diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c85e92ec20..e54b1db265 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: distribution: zulu java-version: ${{ matrix.java }} - name: Cache Maven packages - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.m2/repository diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 8dc3602146..3de9762eea 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -62,7 +62,7 @@ jobs: # Cache downloaded Maven dependencies - name: Cache Maven packages - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.m2/repository diff --git a/CHANGELOG.md b/CHANGELOG.md index dbfda3be02..7acdd7090f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,18 +2,38 @@ ## [Unreleased](https://github.com/aklivity/zilla/tree/HEAD) -[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.65...HEAD) +[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.66...HEAD) + +**Implemented enhancements:** + +- Use `model` and `view` when describing the message type [\#750](https://github.com/aklivity/zilla/issues/750) +- Support obtaining `protobuf` schemas from `schema registry` for `grpc` services [\#697](https://github.com/aklivity/zilla/issues/697) +- Support idempotent `mqtt` `qos 2` publish to `kafka` [\#677](https://github.com/aklivity/zilla/issues/677) +- Detect and inspect invalid messages received [\#676](https://github.com/aklivity/zilla/issues/676) +- Support incremental validation of fragmented messages sent by client [\#671](https://github.com/aklivity/zilla/issues/671) **Fixed bugs:** -- Schema validation fails before the `${{env.*}}` parameters have been removed [\#583](https://github.com/aklivity/zilla/issues/583) +- TLSv1.3 client handshake stall [\#791](https://github.com/aklivity/zilla/issues/791) +- Zilla crashes when it tries to send flush on retain stream [\#770](https://github.com/aklivity/zilla/issues/770) +- Running emqtt\_bench triggers exception in connection pool [\#716](https://github.com/aklivity/zilla/issues/716) +- `mqtt-kafka` does not limit client sharding to `mqtt v5` [\#708](https://github.com/aklivity/zilla/issues/708) +- `tls binding` should handle `null` key returned from `vault` [\#395](https://github.com/aklivity/zilla/issues/395) -**Closed issues:** +## [0.9.66](https://github.com/aklivity/zilla/tree/0.9.66) (2024-01-24) + +[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.65...0.9.66) + +**Implemented enhancements:** - Support `openapi` `http` response validation [\#684](https://github.com/aklivity/zilla/issues/684) - Support `protobuf` conversion to and from `json` for `kafka` messages [\#682](https://github.com/aklivity/zilla/issues/682) - Support incubator features preview in zilla release docker image [\#670](https://github.com/aklivity/zilla/issues/670) +**Fixed bugs:** + +- Schema validation fails before the `${{env.*}}` parameters have been removed [\#583](https://github.com/aklivity/zilla/issues/583) + **Merged pull requests:** - update license exclude path to include both zpmw files [\#759](https://github.com/aklivity/zilla/pull/759) ([vordimous](https://github.com/vordimous)) @@ -29,6 +49,10 @@ **Implemented enhancements:** +- Support `avro` conversion to and from `json` for `kafka` messages [\#681](https://github.com/aklivity/zilla/issues/681) +- Support observability of zilla engine internal streams [\#678](https://github.com/aklivity/zilla/issues/678) +- Simplify configuration of multiple protocols on different tcp ports [\#669](https://github.com/aklivity/zilla/issues/669) +- Simplify kafka client bootstrap server names and ports config [\#619](https://github.com/aklivity/zilla/issues/619) - MQTT publish QoS 2 as Kafka produce with acks in\_sync\_replicas and idempotent `producerId` [\#605](https://github.com/aklivity/zilla/issues/605) - Add the option to route by `port` in the `tls` binding [\#564](https://github.com/aklivity/zilla/issues/564) - Support outbound message transformation from `protobuf` to `json` [\#458](https://github.com/aklivity/zilla/issues/458) @@ -59,10 +83,6 @@ **Closed issues:** - Prototype composite binding support with nested namespaces [\#685](https://github.com/aklivity/zilla/issues/685) -- Support `avro` conversion to and from `json` for `kafka` messages [\#681](https://github.com/aklivity/zilla/issues/681) -- Support observability of zilla engine internal streams [\#678](https://github.com/aklivity/zilla/issues/678) -- Simplify configuration of multiple protocols on different tcp ports [\#669](https://github.com/aklivity/zilla/issues/669) -- Simplify kafka client bootstrap server names and ports config [\#619](https://github.com/aklivity/zilla/issues/619) - Build has been failed in local [\#229](https://github.com/aklivity/zilla/issues/229) **Merged pull requests:** diff --git a/build/flyweight-maven-plugin/pom.xml b/build/flyweight-maven-plugin/pom.xml index 0debf12a8f..f1e3f15dba 100644 --- a/build/flyweight-maven-plugin/pom.xml +++ b/build/flyweight-maven-plugin/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla build - 0.9.66 + 0.9.67 ../pom.xml diff --git a/build/pom.xml b/build/pom.xml index fe55a455ae..154048bda5 100644 --- a/build/pom.xml +++ b/build/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - 0.9.66 + 0.9.67 ../pom.xml diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml index 25f346b970..f9ed5c8295 100644 --- a/cloud/docker-image/pom.xml +++ b/cloud/docker-image/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla cloud - 0.9.66 + 0.9.67 ../pom.xml @@ -249,19 +249,25 @@ ${project.groupId} - validator-avro + model-avro ${project.version} runtime ${project.groupId} - validator-core + model-core ${project.version} runtime ${project.groupId} - validator-json + model-json + ${project.version} + runtime + + + ${project.groupId} + model-protobuf ${project.version} runtime diff --git a/cloud/docker-image/src/main/docker/Dockerfile b/cloud/docker-image/src/main/docker/Dockerfile index 32d8f73f8b..876a1c77af 100644 --- a/cloud/docker-image/src/main/docker/Dockerfile +++ b/cloud/docker-image/src/main/docker/Dockerfile @@ -15,14 +15,12 @@ FROM eclipse-temurin:21-jdk AS build -RUN apt update && apt install -y gettext - COPY maven /root/.m2/repository COPY zpmw zpmw COPY zpm.json.template zpm.json.template -RUN cat zpm.json.template | env VERSION=${project.version} envsubst > zpm.json +RUN cat zpm.json.template | sed "s/\${VERSION}/${project.version}/g" | tee zpm.json RUN ./zpmw install --debug --exclude-remote-repositories RUN ./zpmw clean --keep-image diff --git a/cloud/docker-image/src/main/docker/assembly.xml b/cloud/docker-image/src/main/docker/assembly.xml index c67d7d02c0..ca3954717b 100644 --- a/cloud/docker-image/src/main/docker/assembly.xml +++ b/cloud/docker-image/src/main/docker/assembly.xml @@ -32,8 +32,8 @@ io/aklivity/zilla/exporter-*/** io/aklivity/zilla/guard-*/** io/aklivity/zilla/metrics-*/** + io/aklivity/zilla/model-*/** io/aklivity/zilla/resolver-*/** - io/aklivity/zilla/validator-*/** io/aklivity/zilla/vault-*/** io/aklivity/zilla/command/** io/aklivity/zilla/command-*/** @@ -63,6 +63,8 @@ com/fasterxml/jackson/** org/yaml/snakeyaml/** org/junit/** + com/google/** + org/checkerframework/** diff --git a/cloud/docker-image/src/main/docker/zpm.json.template b/cloud/docker-image/src/main/docker/zpm.json.template index 0abc7656a5..427e9b3f99 100644 --- a/cloud/docker-image/src/main/docker/zpm.json.template +++ b/cloud/docker-image/src/main/docker/zpm.json.template @@ -49,10 +49,11 @@ "io.aklivity.zilla:metrics-stream", "io.aklivity.zilla:metrics-http", "io.aklivity.zilla:metrics-grpc", + "io.aklivity.zilla:model-avro", + "io.aklivity.zilla:model-core", + "io.aklivity.zilla:model-json", + "io.aklivity.zilla:model-protobuf", "io.aklivity.zilla:resolver-env", - "io.aklivity.zilla:validator-avro", - "io.aklivity.zilla:validator-core", - "io.aklivity.zilla:validator-json", "io.aklivity.zilla:vault-filesystem", "org.slf4j:slf4j-simple", "org.antlr:antlr4-runtime" diff --git a/cloud/helm-chart/pom.xml b/cloud/helm-chart/pom.xml index 8bbbbfdca1..e29d415610 100644 --- a/cloud/helm-chart/pom.xml +++ b/cloud/helm-chart/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla cloud - 0.9.66 + 0.9.67 ../pom.xml diff --git a/cloud/pom.xml b/cloud/pom.xml index 27a2230114..f36e49a984 100644 --- a/cloud/pom.xml +++ b/cloud/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - 0.9.66 + 0.9.67 ../pom.xml diff --git a/conf/pom.xml b/conf/pom.xml index 434285c312..1395067ed1 100644 --- a/conf/pom.xml +++ b/conf/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - 0.9.66 + 0.9.67 ../pom.xml diff --git a/incubator/binding-amqp.spec/pom.xml b/incubator/binding-amqp.spec/pom.xml index d1d14e2647..7193406bd7 100644 --- a/incubator/binding-amqp.spec/pom.xml +++ b/incubator/binding-amqp.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml diff --git a/incubator/binding-amqp/pom.xml b/incubator/binding-amqp/pom.xml index 7f5ccd71c7..e0e8777ebf 100644 --- a/incubator/binding-amqp/pom.xml +++ b/incubator/binding-amqp/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml diff --git a/incubator/catalog-inline.spec/pom.xml b/incubator/catalog-inline.spec/pom.xml index b8538b226d..8c578ec4f3 100644 --- a/incubator/catalog-inline.spec/pom.xml +++ b/incubator/catalog-inline.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml diff --git a/incubator/catalog-inline/pom.xml b/incubator/catalog-inline/pom.xml index 2748b9e4c2..d51dab34c6 100644 --- a/incubator/catalog-inline/pom.xml +++ b/incubator/catalog-inline/pom.xml @@ -6,7 +6,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml diff --git a/incubator/catalog-inline/src/test/java/io/aklivity/zilla/runtime/catalog/inline/internal/InlineIT.java b/incubator/catalog-inline/src/test/java/io/aklivity/zilla/runtime/catalog/inline/internal/InlineIT.java index c7d02a0577..b979d6281b 100644 --- a/incubator/catalog-inline/src/test/java/io/aklivity/zilla/runtime/catalog/inline/internal/InlineIT.java +++ b/incubator/catalog-inline/src/test/java/io/aklivity/zilla/runtime/catalog/inline/internal/InlineIT.java @@ -20,11 +20,15 @@ import static org.hamcrest.Matchers.nullValue; import static org.junit.Assert.assertEquals; +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; import org.junit.Before; import org.junit.Test; import io.aklivity.zilla.runtime.catalog.inline.config.InlineOptionsConfig; import io.aklivity.zilla.runtime.catalog.inline.config.InlineSchemaConfig; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; public class InlineIT { @@ -55,4 +59,39 @@ public void shouldResolveSchemaViaSchemaId() assertThat(schema, not(nullValue())); assertEquals(expected, schema); } + + @Test + public void shouldResolveSchemaIdAndProcessData() + { + InlineCatalogHandler catalog = new InlineCatalogHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + String payload = + "{" + + "\"id\": \"123\"," + + "\"status\": \"OK\"" + + "}"; + byte[] bytes = payload.getBytes(); + data.wrap(bytes, 0, bytes.length); + + int valLength = catalog.decode(data, 0, data.capacity(), ValueConsumer.NOP, CatalogHandler.Decoder.IDENTITY); + + assertEquals(data.capacity(), valLength); + } + + @Test + public void shouldVerifyEncodedData() + { + InlineCatalogHandler catalog = new InlineCatalogHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x06, 0x69, 0x64, + 0x30, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65}; + data.wrap(bytes, 0, bytes.length); + + assertEquals(13, catalog.encode(1, data, 0, data.capacity(), + ValueConsumer.NOP, CatalogHandler.Encoder.IDENTITY)); + } } diff --git a/incubator/catalog-schema-registry.spec/pom.xml b/incubator/catalog-schema-registry.spec/pom.xml index 15425da0e6..cc854fcc9e 100644 --- a/incubator/catalog-schema-registry.spec/pom.xml +++ b/incubator/catalog-schema-registry.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml diff --git a/incubator/catalog-schema-registry.spec/src/main/scripts/io/aklivity/zilla/specs/catalog/schema/registry/config/catalog.yaml b/incubator/catalog-schema-registry.spec/src/main/scripts/io/aklivity/zilla/specs/catalog/schema/registry/config/catalog.yaml index c39f8ae0f4..1ed998e6f0 100644 --- a/incubator/catalog-schema-registry.spec/src/main/scripts/io/aklivity/zilla/specs/catalog/schema/registry/config/catalog.yaml +++ b/incubator/catalog-schema-registry.spec/src/main/scripts/io/aklivity/zilla/specs/catalog/schema/registry/config/catalog.yaml @@ -21,3 +21,4 @@ catalogs: options: url: http://localhost:8081 context: default + max-age: 30 diff --git a/incubator/catalog-schema-registry.spec/src/main/scripts/io/aklivity/zilla/specs/catalog/schema/registry/schema/schema.registry.schema.patch.json b/incubator/catalog-schema-registry.spec/src/main/scripts/io/aklivity/zilla/specs/catalog/schema/registry/schema/schema.registry.schema.patch.json index 085fa92996..2864109bc7 100644 --- a/incubator/catalog-schema-registry.spec/src/main/scripts/io/aklivity/zilla/specs/catalog/schema/registry/schema/schema.registry.schema.patch.json +++ b/incubator/catalog-schema-registry.spec/src/main/scripts/io/aklivity/zilla/specs/catalog/schema/registry/schema/schema.registry.schema.patch.json @@ -39,6 +39,12 @@ { "type": "string", "default": "default" + }, + "max-age": + { + "title": "Max Age", + "type": "number", + "default": 300 } }, "additionalProperties": false diff --git a/incubator/catalog-schema-registry/pom.xml b/incubator/catalog-schema-registry/pom.xml index d932582ca8..bf3457a44f 100644 --- a/incubator/catalog-schema-registry/pom.xml +++ b/incubator/catalog-schema-registry/pom.xml @@ -6,7 +6,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml @@ -75,6 +75,22 @@ com.mycila license-maven-plugin + + ${project.groupId} + flyweight-maven-plugin + ${project.version} + + internal + io.aklivity.zilla.runtime.catalog.schema.registry.internal.types + + + + + generate + + + + maven-checkstyle-plugin @@ -125,6 +141,9 @@ org.jacoco jacoco-maven-plugin + + io/aklivity/zilla/runtime/catalog/schema/registry/internal/types/**/*.class + BUNDLE diff --git a/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/CachedSchemaId.java b/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/CachedSchemaId.java new file mode 100644 index 0000000000..82ce19d4b6 --- /dev/null +++ b/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/CachedSchemaId.java @@ -0,0 +1,29 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.catalog.schema.registry.internal; + +public class CachedSchemaId +{ + public long timestamp; + public int id; + + public CachedSchemaId( + long timestamp, + int id) + { + this.timestamp = timestamp; + this.id = id; + } +} diff --git a/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryCatalogHandler.java b/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryCatalogHandler.java index 28376e90a2..c8bc750709 100644 --- a/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryCatalogHandler.java +++ b/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryCatalogHandler.java @@ -18,27 +18,39 @@ import java.net.http.HttpClient; import java.net.http.HttpRequest; import java.net.http.HttpResponse; +import java.nio.ByteOrder; import java.text.MessageFormat; import java.util.zip.CRC32C; +import org.agrona.BitUtil; +import org.agrona.DirectBuffer; import org.agrona.collections.Int2ObjectCache; +import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.catalog.schema.registry.internal.config.SchemaRegistryOptionsConfig; import io.aklivity.zilla.runtime.catalog.schema.registry.internal.serializer.RegisterSchemaRequest; +import io.aklivity.zilla.runtime.catalog.schema.registry.internal.types.SchemaRegistryPrefixFW; import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; public class SchemaRegistryCatalogHandler implements CatalogHandler { private static final String SUBJECT_VERSION_PATH = "/subjects/{0}/versions/{1}"; private static final String SCHEMA_PATH = "/schemas/ids/{0}"; private static final String REGISTER_SCHEMA_PATH = "/subjects/{0}/versions"; + private static final int MAX_PADDING_LENGTH = 5; + private static final byte MAGIC_BYTE = 0x0; + + private final SchemaRegistryPrefixFW.Builder prefixRW = new SchemaRegistryPrefixFW.Builder() + .wrap(new UnsafeBuffer(new byte[5]), 0, 5); private final HttpClient client; private final String baseUrl; private final RegisterSchemaRequest request; private final CRC32C crc32c; - private final Int2ObjectCache cache; - private final Int2ObjectCache schemaIdCache; + private final Int2ObjectCache schemas; + private final Int2ObjectCache schemaIds; + private final long maxAgeMillis; public SchemaRegistryCatalogHandler( SchemaRegistryOptionsConfig config) @@ -47,8 +59,9 @@ public SchemaRegistryCatalogHandler( this.client = HttpClient.newHttpClient(); this.request = new RegisterSchemaRequest(); this.crc32c = new CRC32C(); - this.cache = new Int2ObjectCache<>(1, 1024, i -> {}); - this.schemaIdCache = new Int2ObjectCache<>(1, 1024, i -> {}); + this.schemas = new Int2ObjectCache<>(1, 1024, i -> {}); + this.schemaIds = new Int2ObjectCache<>(1, 1024, i -> {}); + this.maxAgeMillis = config.maxAge.toMillis(); } @Override @@ -69,7 +82,7 @@ public int register( schemaId = response.statusCode() == 200 ? request.resolveResponse(response.body()) : NO_SCHEMA_ID; if (schemaId != NO_SCHEMA_ID) { - cache.put(schemaId, schema); + schemas.put(schemaId, schema); } } catch (Exception ex) @@ -84,9 +97,9 @@ public String resolve( int schemaId) { String schema; - if (cache.containsKey(schemaId)) + if (schemas.containsKey(schemaId)) { - schema = cache.get(schemaId); + schema = schemas.get(schemaId); } else { @@ -94,7 +107,7 @@ public String resolve( schema = response != null ? request.resolveSchemaResponse(response) : null; if (schema != null) { - cache.put(schemaId, schema); + schemas.put(schemaId, schema); } } return schema; @@ -108,9 +121,10 @@ public int resolve( int schemaId; int checkSum = generateCRC32C(subject, version); - if (schemaIdCache.containsKey(checkSum)) + if (schemaIds.containsKey(checkSum) && + (System.currentTimeMillis() - schemaIds.get(checkSum).timestamp) < maxAgeMillis) { - schemaId = Integer.parseInt(schemaIdCache.get(checkSum)); + schemaId = schemaIds.get(checkSum).id; } else { @@ -118,12 +132,72 @@ public int resolve( schemaId = response != null ? request.resolveResponse(response) : NO_SCHEMA_ID; if (schemaId != NO_SCHEMA_ID) { - schemaIdCache.put(checkSum, String.valueOf(schemaId)); + schemaIds.put(checkSum, new CachedSchemaId(System.currentTimeMillis(), schemaId)); } } return schemaId; } + @Override + public int resolve( + DirectBuffer data, + int index, + int length) + { + int schemaId = NO_SCHEMA_ID; + if (data.getByte(index) == MAGIC_BYTE) + { + schemaId = data.getInt(index + BitUtil.SIZE_OF_BYTE, ByteOrder.BIG_ENDIAN); + } + return schemaId; + } + + @Override + public int decode( + DirectBuffer data, + int index, + int length, + ValueConsumer next, + Decoder decoder) + { + int schemaId = NO_SCHEMA_ID; + int progress = 0; + int valLength = -1; + if (data.getByte(index) == MAGIC_BYTE) + { + progress += BitUtil.SIZE_OF_BYTE; + schemaId = data.getInt(index + progress, ByteOrder.BIG_ENDIAN); + progress += BitUtil.SIZE_OF_INT; + } + + if (schemaId > NO_SCHEMA_ID) + { + valLength = decoder.accept(schemaId, data, index + progress, length - progress, next); + } + return valLength; + } + + @Override + public int encode( + int schemaId, + DirectBuffer data, + int index, + int length, + ValueConsumer next, + Encoder encoder) + { + SchemaRegistryPrefixFW prefix = prefixRW.rewrap().schemaId(schemaId).build(); + next.accept(prefix.buffer(), prefix.offset(), prefix.sizeof()); + int valLength = encoder.accept(schemaId, data, index, length, next); + return valLength > 0 ? prefix.sizeof() + valLength : -1; + } + + @Override + public int encodePadding() + { + return MAX_PADDING_LENGTH; + } + private String sendHttpRequest( String path) { diff --git a/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfig.java b/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfig.java index 9febe26531..eabefbf822 100644 --- a/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfig.java +++ b/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfig.java @@ -14,6 +14,7 @@ */ package io.aklivity.zilla.runtime.catalog.schema.registry.internal.config; +import java.time.Duration; import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; @@ -22,6 +23,7 @@ public class SchemaRegistryOptionsConfig extends OptionsConfig { public final String url; public final String context; + public final Duration maxAge; public static SchemaRegistryOptionsConfigBuilder builder() { @@ -36,9 +38,11 @@ public static SchemaRegistryOptionsConfigBuilder builder( public SchemaRegistryOptionsConfig( String url, - String context) + String context, + Duration maxAge) { this.url = url; this.context = context; + this.maxAge = maxAge; } } diff --git a/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigAdapter.java b/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigAdapter.java index 7b8c01552b..fe98b95780 100644 --- a/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigAdapter.java +++ b/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigAdapter.java @@ -14,6 +14,8 @@ */ package io.aklivity.zilla.runtime.catalog.schema.registry.internal.config; +import java.time.Duration; + import jakarta.json.Json; import jakarta.json.JsonObject; import jakarta.json.JsonObjectBuilder; @@ -26,6 +28,7 @@ public class SchemaRegistryOptionsConfigAdapter implements OptionsConfigAdapterS { private static final String URL = "url"; private static final String CONTEXT = "context"; + private static final String MAX_AGE_NAME = "max-age"; @Override public Kind kind() @@ -58,6 +61,12 @@ public JsonObject adaptToJson( catalog.add(CONTEXT, config.context); } + Duration maxAge = config.maxAge; + if (maxAge != null) + { + catalog.add(MAX_AGE_NAME, maxAge.toSeconds()); + } + return catalog.build(); } @@ -78,6 +87,11 @@ public OptionsConfig adaptFromJson( { options.context(object.getString(CONTEXT)); } + + if (object.containsKey(MAX_AGE_NAME)) + { + options.maxAge(Duration.ofSeconds(object.getJsonNumber(MAX_AGE_NAME).longValue())); + } } return options.build(); diff --git a/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigBuilder.java b/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigBuilder.java index 8e05c4049b..eb06664c25 100644 --- a/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigBuilder.java +++ b/incubator/catalog-schema-registry/src/main/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigBuilder.java @@ -14,6 +14,7 @@ */ package io.aklivity.zilla.runtime.catalog.schema.registry.internal.config; +import java.time.Duration; import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; @@ -21,10 +22,13 @@ public final class SchemaRegistryOptionsConfigBuilder extends ConfigBuilder> { + private static final Duration MAX_AGE_DEFAULT = Duration.ofSeconds(300); + private final Function mapper; private String url; private String context; + private Duration maxAge; SchemaRegistryOptionsConfigBuilder( Function mapper) @@ -53,9 +57,17 @@ public SchemaRegistryOptionsConfigBuilder context( return this; } + public SchemaRegistryOptionsConfigBuilder maxAge( + Duration maxAge) + { + this.maxAge = maxAge; + return this; + } + @Override public T build() { - return mapper.apply(new SchemaRegistryOptionsConfig(url, context)); + Duration maxAge = (this.maxAge != null) ? this.maxAge : MAX_AGE_DEFAULT; + return mapper.apply(new SchemaRegistryOptionsConfig(url, context, maxAge)); } } diff --git a/incubator/catalog-schema-registry/src/main/zilla/internal.idl b/incubator/catalog-schema-registry/src/main/zilla/internal.idl new file mode 100644 index 0000000000..8a57eb7c2b --- /dev/null +++ b/incubator/catalog-schema-registry/src/main/zilla/internal.idl @@ -0,0 +1,24 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +scope internal +{ + option byteorder network; + + struct SchemaRegistryPrefix + { + uint8 magic = 0; + int32 schemaId; + } +} diff --git a/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryCatalogFactoryTest.java b/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryCatalogFactoryTest.java index 91bea45a61..ab68d6bf65 100644 --- a/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryCatalogFactoryTest.java +++ b/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryCatalogFactoryTest.java @@ -19,6 +19,8 @@ import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; +import java.time.Duration; + import org.junit.Test; import io.aklivity.zilla.runtime.catalog.schema.registry.internal.config.SchemaRegistryOptionsConfig; @@ -45,10 +47,14 @@ public void shouldLoadAndCreate() CatalogContext context = catalog.supply(mock(EngineContext.class)); assertThat(context, instanceOf(SchemaRegistryCatalogContext.class)); - SchemaRegistryOptionsConfig catalogConfig = - new SchemaRegistryOptionsConfig("http://localhost:8081", "default"); + SchemaRegistryOptionsConfig catalogConfig = SchemaRegistryOptionsConfig.builder() + .url("http://localhost:8081") + .context("default") + .maxAge(Duration.ofSeconds(100)) + .build(); CatalogConfig options = new CatalogConfig("test", "catalog0", "schema-registry", catalogConfig); CatalogHandler handler = context.attach(options); + assertThat(handler, instanceOf(SchemaRegistryCatalogHandler.class)); } } diff --git a/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryIT.java b/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryIT.java index c115321a4c..f65af539c1 100644 --- a/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryIT.java +++ b/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/SchemaRegistryIT.java @@ -21,6 +21,10 @@ import static org.junit.Assert.assertEquals; import static org.junit.rules.RuleChain.outerRule; +import java.time.Duration; + +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -31,6 +35,8 @@ import org.kaazing.k3po.junit.rules.K3poRule; import io.aklivity.zilla.runtime.catalog.schema.registry.internal.config.SchemaRegistryOptionsConfig; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; public class SchemaRegistryIT { @@ -47,7 +53,11 @@ public class SchemaRegistryIT @Before public void setup() { - config = new SchemaRegistryOptionsConfig("http://localhost:8081", "default"); + config = SchemaRegistryOptionsConfig.builder() + .url("http://localhost:8081") + .context("default") + .maxAge(Duration.ofSeconds(1)) + .build(); } @Test @@ -153,4 +163,60 @@ public void shouldResolveSchemaViaSubjectVersionFromCache() throws Exception assertThat(schema, not(nullValue())); assertEquals(expected, schema); } + + @Test + public void shouldVerifyMaxPadding() + { + SchemaRegistryCatalogHandler catalog = new SchemaRegistryCatalogHandler(config); + + assertEquals(5, catalog.encodePadding()); + } + + @Test + public void shouldVerifyEncodedData() + { + SchemaRegistryCatalogHandler catalog = new SchemaRegistryCatalogHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x06, 0x69, 0x64, + 0x30, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65}; + data.wrap(bytes, 0, bytes.length); + + assertEquals(18, catalog.encode(1, data, 0, data.capacity(), + ValueConsumer.NOP, CatalogHandler.Encoder.IDENTITY)); + } + + @Test + public void shouldResolveSchemaIdAndProcessData() + { + + SchemaRegistryCatalogHandler catalog = new SchemaRegistryCatalogHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x00, 0x00, 0x00, 0x00, 0x09, 0x06, 0x69, 0x64, + 0x30, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65}; + data.wrap(bytes, 0, bytes.length); + + int valLength = catalog.decode(data, 0, data.capacity(), ValueConsumer.NOP, CatalogHandler.Decoder.IDENTITY); + + assertEquals(data.capacity() - 5, valLength); + } + + @Test + public void shouldResolveSchemaIdFromData() + { + SchemaRegistryCatalogHandler catalog = new SchemaRegistryCatalogHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x00, 0x00, 0x00, 0x00, 0x09, 0x06, 0x69, 0x64, + 0x30, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65}; + data.wrap(bytes, 0, bytes.length); + + int schemaId = catalog.resolve(data, 0, data.capacity()); + + assertEquals(9, schemaId); + } } diff --git a/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigAdapterTest.java b/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigAdapterTest.java index add863f2d5..0c957a87e2 100644 --- a/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigAdapterTest.java +++ b/incubator/catalog-schema-registry/src/test/java/io/aklivity/zilla/runtime/catalog/schema/registry/internal/config/SchemaRegistryOptionsConfigAdapterTest.java @@ -19,6 +19,8 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import java.time.Duration; + import jakarta.json.bind.Jsonb; import jakarta.json.bind.JsonbBuilder; import jakarta.json.bind.JsonbConfig; @@ -45,23 +47,28 @@ public void shouldReadCondition() "{" + "\"url\": \"http://localhost:8081\"," + "\"context\": \"default\"," + - "}"; + "}"; SchemaRegistryOptionsConfig catalog = jsonb.fromJson(text, SchemaRegistryOptionsConfig.class); assertThat(catalog, not(nullValue())); assertThat(catalog.url, equalTo("http://localhost:8081")); assertThat(catalog.context, equalTo("default")); + assertThat(catalog.maxAge.toSeconds(), equalTo(300L)); } @Test public void shouldWriteCondition() { - SchemaRegistryOptionsConfig catalog = new SchemaRegistryOptionsConfig("http://localhost:8081", "default"); + SchemaRegistryOptionsConfig catalog = SchemaRegistryOptionsConfig.builder() + .url("http://localhost:8081") + .context("default") + .maxAge(Duration.ofSeconds(300)) + .build(); String text = jsonb.toJson(catalog); assertThat(text, not(nullValue())); - assertThat(text, equalTo("{\"url\":\"http://localhost:8081\",\"context\":\"default\"}")); + assertThat(text, equalTo("{\"url\":\"http://localhost:8081\",\"context\":\"default\",\"max-age\":300}")); } } diff --git a/incubator/command-dump/pom.xml b/incubator/command-dump/pom.xml index 23c3d4c19e..53be863e0c 100644 --- a/incubator/command-dump/pom.xml +++ b/incubator/command-dump/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml diff --git a/incubator/command-dump/src/main/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/zilla.lua b/incubator/command-dump/src/main/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/zilla.lua index 355b14a8f0..9cf59d0b9f 100644 --- a/incubator/command-dump/src/main/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/zilla.lua +++ b/incubator/command-dump/src/main/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/zilla.lua @@ -142,6 +142,7 @@ local kafka_ext_apis = { [253] = "GROUP", [254] = "BOOTSTRAP", [255] = "MERGED", + [22] = "INIT_PRODUCER_ID", [3] = "META", [8] = "OFFSET_COMMIT", [9] = "OFFSET_FETCH", @@ -432,7 +433,8 @@ local fields = { mqtt_ext_topic_length = ProtoField.int16("zilla.mqtt_ext.topic_length", "Length", base.DEC), mqtt_ext_topic = ProtoField.string("zilla.mqtt_ext.topic", "Topic", base.NONE), mqtt_ext_expiry = ProtoField.int32("zilla.mqtt_ext.expiry", "Expiry", base.DEC), - mqtt_ext_qos_max = ProtoField.uint16("zilla.mqtt_ext.qos_max", "QoS Maximum", base.DEC), + mqtt_ext_subscribe_qos_max = ProtoField.uint16("zilla.mqtt_ext.subscribe_qos_max", "Subscribe QoS Maximum", base.DEC), + mqtt_ext_publish_qos_max = ProtoField.uint16("zilla.mqtt_ext.publish_qos_max", "Publish QoS Maximum", base.DEC), mqtt_ext_packet_size_max = ProtoField.uint32("zilla.mqtt_ext.packet_size_max", "Packet Size Maximum", base.DEC), -- capabilities mqtt_ext_capabilities = ProtoField.uint8("zilla.mqtt_ext.capabilities", "Capabilities", base.HEX), @@ -584,6 +586,8 @@ local fields = { kafka_ext_ancestor_offset = ProtoField.int64("zilla.kafka_ext.ancestor_offset", "Ancestor Offset", base.DEC), kafka_ext_headers_array_length = ProtoField.int8("zilla.kafka_ext.headers_array_length", "Length", base.DEC), kafka_ext_headers_array_size = ProtoField.int8("zilla.kafka_ext.headers_array_size", "Size", base.DEC), + kafka_ext_producer_id = ProtoField.uint64("zilla.kafka_ext.producer_id", "Producer ID", base.HEX), + kafka_ext_producer_epoch = ProtoField.uint16("zilla.kafka_ext.producer_epoch", "Producer Epoch", base.HEX), -- meta kafka_ext_partition_leader_id = ProtoField.int32("zilla.kafka_ext.partition_leader_id", "Leader ID", base.DEC), -- offset_fetch @@ -600,7 +604,6 @@ local fields = { kafka_ext_config = ProtoField.string("zilla.kafka_ext.config", "Config", base.NONE), -- fetch kafka_ext_header_size_max = ProtoField.int32("zilla.kafka_ext.header_size_max", "Header Size Maximum", base.DEC), - kafka_ext_producer_id = ProtoField.uint64("zilla.kafka_ext.producer_id", "Producer ID", base.HEX), kafka_ext_transactions_array_length = ProtoField.int8("zilla.kafka_ext.transactions_array_length", "Length", base.DEC), kafka_ext_transactions_array_size = ProtoField.int8("zilla.kafka_ext.transactions_array_size", "Size", base.DEC), kafka_ext_transaction_result = ProtoField.int8("zilla.kafka_ext.transaction_result", "Result", base.DEC, @@ -1611,8 +1614,12 @@ function handle_mqtt_extension(buffer, offset, ext_subtree, frame_type_id) elseif kind == "SESSION" then handle_mqtt_data_session_extension(buffer, offset + kind_length, ext_subtree) end - elseif frame_type_id == FLUSH_ID and kind == "SUBSCRIBE" then - handle_mqtt_flush_subscribe_extension(buffer, offset + kind_length, ext_subtree) + elseif frame_type_id == FLUSH_ID then + if kind == "SUBSCRIBE" then + handle_mqtt_flush_subscribe_extension(buffer, offset + kind_length, ext_subtree) + elseif kind == "SESSION" then + handle_mqtt_flush_session_extension(buffer, offset + kind_length, ext_subtree) + end end elseif frame_type_id == RESET_ID then handle_mqtt_reset_extension(buffer, offset, ext_subtree) @@ -1718,13 +1725,18 @@ function handle_mqtt_begin_session_extension(buffer, offset, ext_subtree) local expiry_length = 4 local slice_expiry = buffer(expiry_offset, expiry_length) ext_subtree:add_le(fields.mqtt_ext_expiry, slice_expiry) - -- qos_max - local qos_max_offset = expiry_offset + expiry_length - local qos_max_length = 2 - local slice_qos_max = buffer(qos_max_offset, qos_max_length) - ext_subtree:add_le(fields.mqtt_ext_qos_max, slice_qos_max) + -- subscribe_qos_max + local subscribe_qos_max_offset = expiry_offset + expiry_length + local subscribe_qos_max_length = 2 + local slice_subscribe_qos_max = buffer(subscribe_qos_max_offset, subscribe_qos_max_length) + ext_subtree:add_le(fields.mqtt_ext_subscribe_qos_max, slice_subscribe_qos_max) + -- publish_qos_max + local publish_qos_max_offset = subscribe_qos_max_offset + subscribe_qos_max_length + local publish_qos_max_length = 2 + local slice_publish_qos_max = buffer(publish_qos_max_offset, publish_qos_max_length) + ext_subtree:add_le(fields.mqtt_ext_publish_qos_max, slice_publish_qos_max) -- packet_size_max - local packet_size_max_offset = qos_max_offset + qos_max_length + local packet_size_max_offset = publish_qos_max_offset + publish_qos_max_length local packet_size_max_length = 4 local slice_packet_size_max = buffer(packet_size_max_offset, packet_size_max_length) ext_subtree:add_le(fields.mqtt_ext_packet_size_max, slice_packet_size_max) @@ -1763,8 +1775,13 @@ function handle_mqtt_data_publish_extension(buffer, offset, ext_subtree) local flags_label = string.format("Flags: 0x%02x", slice_flags:le_uint()) local flags_subtree = ext_subtree:add(zilla_protocol, slice_flags, flags_label) flags_subtree:add_le(fields.mqtt_ext_publish_flags_retain, slice_flags) + -- packet_id + local packet_id_offset = flags_offset + flags_length + local packet_id_length = 2 + local slice_packet_id = buffer(packet_id_offset, packet_id_length) + ext_subtree:add_le(fields.mqtt_ext_packet_id, slice_packet_id) -- expiry_interval - local expiry_interval_offset = flags_offset + flags_length + local expiry_interval_offset = packet_id_offset + packet_id_length local expiry_interval_length = 4 local slice_expiry_interval = buffer(expiry_interval_offset, expiry_interval_length) ext_subtree:add_le(fields.mqtt_ext_expiry_interval, slice_expiry_interval) @@ -1943,6 +1960,14 @@ function handle_mqtt_flush_subscribe_extension(buffer, offset, ext_subtree) dissect_and_add_mqtt_topic_filters(buffer, topic_filters_offset, ext_subtree) end +function handle_mqtt_flush_session_extension(buffer, offset, ext_subtree) + -- packet_id + local packet_id_offset = offset + local packet_id_length = 2 + local slice_packet_id = buffer(packet_id_offset, packet_id_length) + ext_subtree:add_le(fields.mqtt_ext_packet_id, slice_packet_id) +end + function handle_mqtt_reset_extension(buffer, offset, ext_subtree) -- server_ref local server_ref_offset = offset @@ -1976,6 +2001,8 @@ function handle_kafka_extension(buffer, offset, ext_subtree, frame_type_id) handle_kafka_begin_bootstrap_extension(buffer, offset + api_length, ext_subtree) elseif api == "MERGED" then handle_kafka_begin_merged_extension(buffer, offset + api_length, ext_subtree) + elseif api == "INIT_PRODUCER_ID" then + handle_kafka_begin_init_producer_id_extension(buffer, offset + api_length, ext_subtree) elseif api == "META" then handle_kafka_begin_meta_extension(buffer, offset + api_length, ext_subtree) elseif api == "OFFSET_COMMIT" then @@ -2366,6 +2393,19 @@ function handle_kafka_begin_merged_extension(buffer, offset, ext_subtree) ext_subtree:add(fields.kafka_ext_ack_mode, ack_mode) end +function handle_kafka_begin_init_producer_id_extension(buffer, offset, ext_subtree) + -- producer_id + local producer_id_offset = offset + local producer_id_length = 8 + local slice_producer_id = buffer(producer_id_offset, producer_id_length) + ext_subtree:add_le(fields.kafka_ext_producer_id, slice_producer_id) + -- producer_epoch + local producer_epoch_offset = producer_id_offset + producer_id_length + local producer_epoch_length = 2 + local slice_producer_epoch = buffer(producer_epoch_offset, producer_epoch_length) + ext_subtree:add_le(fields.kafka_ext_producer_epoch, slice_producer_epoch) +end + function dissect_and_add_kafka_filters_array(buffer, offset, tree, field_array_length, field_array_size) local length, array_size = dissect_and_add_array_header_as_subtree(buffer, offset, tree, "Filters (%d items)", field_array_length, field_array_size) @@ -2791,8 +2831,18 @@ function handle_kafka_data_merged_produce_extension(buffer, offset, ext_subtree) local timestamp_length = 8 local slice_timestamp = buffer(timestamp_offset, timestamp_length) ext_subtree:add_le(fields.sse_ext_timestamp, slice_timestamp) + -- producer_id + local producer_id_offset = timestamp_offset + timestamp_length + local producer_id_length = 8 + local slice_producer_id = buffer(producer_id_offset, producer_id_length) + ext_subtree:add_le(fields.kafka_ext_producer_id, slice_producer_id) + -- producer_epoch + local producer_epoch_offset = producer_id_offset + producer_id_length + local producer_epoch_length = 2 + local slice_producer_epoch = buffer(producer_epoch_offset, producer_epoch_length) + ext_subtree:add_le(fields.kafka_ext_producer_epoch, slice_producer_epoch) -- partition - local partition_offset = timestamp_offset + timestamp_length + local partition_offset = producer_epoch_offset + producer_epoch_length local partition_length = resolve_length_of_kafka_offset(buffer, partition_offset) dissect_and_add_kafka_offset(buffer, partition_offset, ext_subtree, "Partition: %d [%d]") -- key @@ -2909,13 +2959,8 @@ function dissect_and_add_kafka_partition_array(buffer, offset, tree, field_array end function handle_kafka_begin_offset_commit_extension(buffer, offset, ext_subtree) - -- topic - local topic_offset = offset - local topic_length, slice_topic_length, slice_topic_text = dissect_length_value(buffer, topic_offset, 2) - add_string_as_subtree(buffer(topic_offset, topic_length), ext_subtree, "Topic: %s", - slice_topic_length, slice_topic_text, fields.mqtt_ext_topic_length, fields.mqtt_ext_topic) -- group_id - local group_id_offset = topic_offset + topic_length + local group_id_offset = offset local group_id_length, slice_group_id_length, slice_group_id_text = dissect_length_value(buffer, group_id_offset, 2) add_string_as_subtree(buffer(group_id_offset, group_id_length), ext_subtree, "Group ID: %s", slice_group_id_length, slice_group_id_text, fields.kafka_ext_group_id_length, fields.kafka_ext_group_id) @@ -2929,11 +2974,26 @@ function handle_kafka_begin_offset_commit_extension(buffer, offset, ext_subtree) local instance_id_length, slice_instance_id_length, slice_instance_id_text = dissect_length_value(buffer, instance_id_offset, 2) add_string_as_subtree(buffer(instance_id_offset, instance_id_length), ext_subtree, "Instance ID: %s", slice_instance_id_length, slice_instance_id_text, fields.kafka_ext_instance_id_length, fields.kafka_ext_instance_id) + -- host + local host_offset = instance_id_offset + instance_id_length + local host_length, slice_host_length, slice_host_text = dissect_length_value(buffer, host_offset, 2) + add_string_as_subtree(buffer(host_offset, host_length), ext_subtree, "Host: %s", + slice_host_length, slice_host_text, fields.kafka_ext_host_length, fields.kafka_ext_host) + -- port + local port_offset = host_offset + host_length + local port_length = 4 + local slice_port = buffer(port_offset, port_length) + ext_subtree:add_le(fields.kafka_ext_port, slice_port) end function handle_kafka_data_offset_commit_extension(buffer, offset, ext_subtree) + -- topic + local topic_offset = offset + local topic_lentgh, slice_topic_length, slice_topic_text = dissect_length_value(buffer, topic_offset, 2) + add_string_as_subtree(buffer(topic_offset, topic_length), ext_subtree, "Topic: %s", + slice_topic_length, slice_topic_text, fields.mqtt_ext_topic_length, fields.mqtt_ext_topic) -- progress - local progress_offset = offset + local progress_offset = topic_offset + topic_lentgh local progress_length = resolve_length_of_kafka_offset(buffer, progress_offset) dissect_and_add_kafka_offset(buffer, progress_offset, ext_subtree, "Progress: %d [%d]") -- generation_id @@ -3285,13 +3345,8 @@ function handle_kafka_begin_produce_extension(buffer, offset, ext_subtree) local transaction_length, slice_transaction_length, slice_transaction_text = dissect_length_value(buffer, transaction_offset, 1) add_string_as_subtree(buffer(transaction_offset, transaction_length), ext_subtree, "Transaction: %s", slice_transaction_length, slice_transaction_text, fields.kafka_ext_transaction_length, fields.kafka_ext_transaction) - -- producer_id - local producer_id_offset = transaction_offset + transaction_length - local producer_id_length = 8 - local slice_producer_id = buffer(producer_id_offset, producer_id_length) - ext_subtree:add_le(fields.kafka_ext_producer_id, slice_producer_id) -- topic - local topic_offset = producer_id_offset + producer_id_length + local topic_offset = transaction_offset + transaction_length local topic_length, slice_topic_length, slice_topic_text = dissect_length_value(buffer, topic_offset, 2) add_string_as_subtree(buffer(topic_offset, topic_length), ext_subtree, "Topic: %s", slice_topic_length, slice_topic_text, fields.mqtt_ext_topic_length, fields.mqtt_ext_topic) @@ -3312,8 +3367,18 @@ function handle_kafka_data_produce_extension(buffer, offset, ext_subtree) local timestamp_length = 8 local slice_timestamp = buffer(timestamp_offset, timestamp_length) ext_subtree:add_le(fields.sse_ext_timestamp, slice_timestamp) + -- producer_id + local producer_id_offset = timestamp_offset + timestamp_length + local producer_id_length = 8 + local slice_producer_id = buffer(producer_id_offset, producer_id_length) + ext_subtree:add_le(fields.kafka_ext_producer_id, slice_producer_id) + -- producer_epoch + local producer_epoch_offset = producer_id_offset + producer_id_length + local producer_epoch_length = 2 + local slice_producer_epoch = buffer(producer_epoch_offset, producer_epoch_length) + ext_subtree:add_le(fields.kafka_ext_producer_epoch, slice_producer_epoch) -- sequence - local sequence_offset = timestamp_offset + timestamp_length + local sequence_offset = producer_epoch_offset + producer_epoch_length local sequence_length = 4 local slice_sequence = buffer(sequence_offset, sequence_length) ext_subtree:add_le(fields.kafka_ext_sequence, slice_sequence) diff --git a/incubator/command-dump/src/test/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommandTest.java b/incubator/command-dump/src/test/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommandTest.java index 483c3a3c06..faa4f8abf9 100644 --- a/incubator/command-dump/src/test/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommandTest.java +++ b/incubator/command-dump/src/test/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommandTest.java @@ -1272,6 +1272,7 @@ public void generateStreamsBuffer() throws Exception .publish() .qos("EXACTLY_ONCE") .flags("RETAIN") + .packetId(0x42) .expiryInterval(77) .contentType("Content Type") .format("BINARY") @@ -1493,7 +1494,8 @@ public void generateStreamsBuffer() throws Exception .session() .flags("CLEAN_START") .expiry(42) - .qosMax(2) + .subscribeQosMax(2) + .publishQosMax(1) .packetSizeMax(42_000) .capabilities("RETAIN") .clientId("client-id") @@ -1518,7 +1520,8 @@ public void generateStreamsBuffer() throws Exception .session() .flags("CLEAN_START", "WILL") .expiry(42) - .qosMax(2) + .subscribeQosMax(1) + .publishQosMax(2) .packetSizeMax(42_000) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-id") @@ -1585,6 +1588,27 @@ public void generateStreamsBuffer() throws Exception .build(); streams[0].write(DataFW.TYPE_ID, data23.buffer(), 0, data23.sizeof()); + DirectBuffer mqttSessionFlushEx1 = new UnsafeBuffer(MqttFunctions.flushEx() + .typeId(MQTT_TYPE_ID) + .session() + .packetId(0x2142) + .build() + .build()); + FlushFW flush5 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + .originId(0x0000000900000022L) // north_mqtt_server + .routedId(0x0000000900000023L) // north_mqtt_kafka_mapping + .streamId(0x0000000000000025L) // INI + .sequence(401) + .acknowledge(402) + .maximum(7777) + .timestamp(0x0000000000000143L) + .traceId(0x0000000000000025L) + .budgetId(0x0000000000000000L) + .reserved(0x00000000) + .extension(mqttSessionFlushEx1, 0, mqttSessionFlushEx1.capacity()) + .build(); + streams[0].write(FlushFW.TYPE_ID, flush5.buffer(), 0, flush5.sizeof()); + // kafka extension // - CONSUMER DirectBuffer kafkaConsumerBeginEx1 = new UnsafeBuffer(KafkaFunctions.beginEx() @@ -1680,7 +1704,7 @@ public void generateStreamsBuffer() throws Exception .correlationId(77) .build() .build()); - FlushFW flush5 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + FlushFW flush6 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x0000000000000027L) // INI @@ -1693,7 +1717,7 @@ public void generateStreamsBuffer() throws Exception .reserved(0x00000000) .extension(kafkaConsumerFlushEx1, 0, kafkaConsumerFlushEx1.capacity()) .build(); - streams[0].write(FlushFW.TYPE_ID, flush5.buffer(), 0, flush5.sizeof()); + streams[0].write(FlushFW.TYPE_ID, flush6.buffer(), 0, flush6.sizeof()); DirectBuffer kafkaResetEx1 = new UnsafeBuffer(KafkaFunctions.resetEx() .typeId(KAFKA_TYPE_ID) @@ -1773,7 +1797,7 @@ public void generateStreamsBuffer() throws Exception .memberId("member-id") .build() .build()); - FlushFW flush6 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + FlushFW flush7 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x0000000000000029L) // INI @@ -1786,7 +1810,7 @@ public void generateStreamsBuffer() throws Exception .reserved(0x00000000) .extension(kafkaGroupFlushEx1, 0, kafkaGroupFlushEx1.capacity()) .build(); - streams[0].write(FlushFW.TYPE_ID, flush6.buffer(), 0, flush6.sizeof()); + streams[0].write(FlushFW.TYPE_ID, flush7.buffer(), 0, flush7.sizeof()); DirectBuffer kafkaGroupFlushEx2 = new UnsafeBuffer(KafkaFunctions.flushEx() .typeId(KAFKA_TYPE_ID) @@ -1799,7 +1823,7 @@ public void generateStreamsBuffer() throws Exception .members("member-3") .build() .build()); - FlushFW flush7 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + FlushFW flush8 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x0000000000000028L) // REP @@ -1812,7 +1836,7 @@ public void generateStreamsBuffer() throws Exception .reserved(0x00000000) .extension(kafkaGroupFlushEx2, 0, kafkaGroupFlushEx2.capacity()) .build(); - streams[0].write(FlushFW.TYPE_ID, flush7.buffer(), 0, flush7.sizeof()); + streams[0].write(FlushFW.TYPE_ID, flush8.buffer(), 0, flush8.sizeof()); // - BOOTSTRAP DirectBuffer kafkaBootstrapBeginEx1 = new UnsafeBuffer(KafkaFunctions.beginEx() @@ -2029,6 +2053,8 @@ public void generateStreamsBuffer() throws Exception .produce() .deferred(100) .timestamp(0x53) + .producerId(0x77L) + .producerEpoch((short) 0x42) .partition(1, 77_000) .key("key") .hashKey("hash-key") @@ -2060,7 +2086,7 @@ public void generateStreamsBuffer() throws Exception .correlationId(77) .build() .build()); - FlushFW flush8 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + FlushFW flush9 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x0000000000000033L) // INI @@ -2073,7 +2099,7 @@ public void generateStreamsBuffer() throws Exception .reserved(0x00000000) .extension(kafkaMergedConsumerFlushEx, 0, kafkaMergedConsumerFlushEx.capacity()) .build(); - streams[0].write(FlushFW.TYPE_ID, flush8.buffer(), 0, flush8.sizeof()); + streams[0].write(FlushFW.TYPE_ID, flush9.buffer(), 0, flush9.sizeof()); DirectBuffer kafkaMergedFetchFlushEx = new UnsafeBuffer(KafkaFunctions.flushEx() .typeId(KAFKA_TYPE_ID) @@ -2090,7 +2116,7 @@ public void generateStreamsBuffer() throws Exception .key("key") .build() .build()); - FlushFW flush9 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + FlushFW flush10 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x0000000000000033L) // INI @@ -2103,16 +2129,59 @@ public void generateStreamsBuffer() throws Exception .reserved(0x00000000) .extension(kafkaMergedFetchFlushEx, 0, kafkaMergedFetchFlushEx.capacity()) .build(); - streams[0].write(FlushFW.TYPE_ID, flush9.buffer(), 0, flush9.sizeof()); + streams[0].write(FlushFW.TYPE_ID, flush10.buffer(), 0, flush10.sizeof()); + + // - INIT_PRODUCER_ID + DirectBuffer kafkaInitProducerIdBeginEx1 = new UnsafeBuffer(KafkaFunctions.beginEx() + .typeId(KAFKA_TYPE_ID) + .initProducerId() + .producerId(0x77L) + .producerEpoch((short) 0x42) + .build() + .build()); + BeginFW begin34 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + .originId(0x000000090000000fL) // north_kafka_cache_client + .routedId(0x0000000900000010L) // south_kafka_cache_server + .streamId(0x0000000000000133L) // INI + .sequence(0) + .acknowledge(0) + .maximum(0) + .timestamp(0x0000000000000056L) + .traceId(0x0000000000000035L) + .affinity(0x0000000000000000L) + .extension(kafkaInitProducerIdBeginEx1, 0, kafkaInitProducerIdBeginEx1.capacity()) + .build(); + streams[0].write(BeginFW.TYPE_ID, begin34.buffer(), 0, begin34.sizeof()); + + DirectBuffer kafkaInitProducerIdBeginEx2 = new UnsafeBuffer(KafkaFunctions.beginEx() + .typeId(KAFKA_TYPE_ID) + .initProducerId() + .producerId(0x88L) + .producerEpoch((short) 0x21) + .build() + .build()); + BeginFW begin35 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + .originId(0x000000090000000fL) // north_kafka_cache_client + .routedId(0x0000000900000010L) // south_kafka_cache_server + .streamId(0x0000000000000132L) // REP + .sequence(0) + .acknowledge(0) + .maximum(0) + .timestamp(0x0000000000000057L) + .traceId(0x0000000000000035L) + .affinity(0x0000000000000000L) + .extension(kafkaInitProducerIdBeginEx2, 0, kafkaInitProducerIdBeginEx2.capacity()) + .build(); + streams[0].write(BeginFW.TYPE_ID, begin35.buffer(), 0, begin35.sizeof()); // - META - DirectBuffer kafkaMetaBegin1 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaMetaBeginEx1 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .meta() .topic("topic") .build() .build()); - BeginFW begin34 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin36 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x0000000000000035L) // INI @@ -2122,17 +2191,17 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x0000000000000056L) .traceId(0x0000000000000035L) .affinity(0x0000000000000000L) - .extension(kafkaMetaBegin1, 0, kafkaMetaBegin1.capacity()) + .extension(kafkaMetaBeginEx1, 0, kafkaMetaBeginEx1.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin34.buffer(), 0, begin34.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin36.buffer(), 0, begin36.sizeof()); - DirectBuffer kafkaMetaBegin2 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaMetaBeginEx2 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .meta() .topic("topic") .build() .build()); - BeginFW begin35 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin37 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x0000000000000034L) // REP @@ -2142,9 +2211,9 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x0000000000000057L) .traceId(0x0000000000000035L) .affinity(0x0000000000000000L) - .extension(kafkaMetaBegin2, 0, kafkaMetaBegin2.capacity()) + .extension(kafkaMetaBeginEx2, 0, kafkaMetaBeginEx2.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin35.buffer(), 0, begin35.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin37.buffer(), 0, begin37.sizeof()); DirectBuffer kafkaMetaDataPayload = new String8FW("kafka meta data payload").value(); DirectBuffer kafkaMetaDataEx1 = new UnsafeBuffer(KafkaFunctions.dataEx() @@ -2172,16 +2241,17 @@ public void generateStreamsBuffer() throws Exception streams[0].write(DataFW.TYPE_ID, data27.buffer(), 0, data27.sizeof()); // - OFFSET_COMMIT - DirectBuffer kafkaOffsetCommitBegin1 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaOffsetCommitBeginEx1 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .offsetCommit() - .topic("topic") .groupId("group") .memberId("member") .instanceId("instance") + .host("host") + .port(4242) .build() .build()); - BeginFW begin36 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin38 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x0000000000000037L) // INI @@ -2191,20 +2261,21 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x0000000000000059L) .traceId(0x0000000000000037L) .affinity(0x0000000000000000L) - .extension(kafkaOffsetCommitBegin1, 0, kafkaOffsetCommitBegin1.capacity()) + .extension(kafkaOffsetCommitBeginEx1, 0, kafkaOffsetCommitBeginEx1.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin36.buffer(), 0, begin36.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin38.buffer(), 0, begin38.sizeof()); - DirectBuffer kafkaOffsetCommitBegin2 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaOffsetCommitBeginEx2 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .offsetCommit() - .topic("topic") .groupId("group") .memberId("member") .instanceId("instance") + .host("host") + .port(4242) .build() .build()); - BeginFW begin37 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin39 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x0000000000000036L) // REP @@ -2214,14 +2285,15 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x000000000000005aL) .traceId(0x0000000000000037L) .affinity(0x0000000000000000L) - .extension(kafkaOffsetCommitBegin2, 0, kafkaOffsetCommitBegin2.capacity()) + .extension(kafkaOffsetCommitBeginEx2, 0, kafkaOffsetCommitBeginEx2.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin37.buffer(), 0, begin37.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin39.buffer(), 0, begin39.sizeof()); DirectBuffer kafkaOffsetCommitDataPayload = new String8FW("kafka offset commit data payload").value(); DirectBuffer kafkaOffsetCommitDataEx1 = new UnsafeBuffer(KafkaFunctions.dataEx() .typeId(KAFKA_TYPE_ID) .offsetCommit() + .topic("test") .progress(21, 1234, "metadata") .generationId(42) .leaderEpoch(77) @@ -2244,7 +2316,7 @@ public void generateStreamsBuffer() throws Exception streams[0].write(DataFW.TYPE_ID, data28.buffer(), 0, data28.sizeof()); // - OFFSET_FETCH - DirectBuffer kafkaOffsetFetchBegin1 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaOffsetFetchBeginEx1 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .offsetFetch() .groupId("group") @@ -2257,7 +2329,7 @@ public void generateStreamsBuffer() throws Exception .partition(88) .build() .build()); - BeginFW begin38 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin40 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x0000000000000039L) // INI @@ -2267,11 +2339,11 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x000000000000005cL) .traceId(0x0000000000000039L) .affinity(0x0000000000000000L) - .extension(kafkaOffsetFetchBegin1, 0, kafkaOffsetFetchBegin1.capacity()) + .extension(kafkaOffsetFetchBeginEx1, 0, kafkaOffsetFetchBeginEx1.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin38.buffer(), 0, begin38.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin40.buffer(), 0, begin40.sizeof()); - DirectBuffer kafkaOffsetFetchBegin2 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaOffsetFetchBeginEx2 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .offsetFetch() .groupId("group") @@ -2281,7 +2353,7 @@ public void generateStreamsBuffer() throws Exception .partition(42) .build() .build()); - BeginFW begin39 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin41 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x0000000000000038L) // REP @@ -2291,9 +2363,9 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x000000000000005dL) .traceId(0x0000000000000039L) .affinity(0x0000000000000000L) - .extension(kafkaOffsetFetchBegin2, 0, kafkaOffsetFetchBegin2.capacity()) + .extension(kafkaOffsetFetchBeginEx2, 0, kafkaOffsetFetchBeginEx2.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin39.buffer(), 0, begin39.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin41.buffer(), 0, begin41.sizeof()); DirectBuffer kafkaOffsetFetchDataPayload = new String8FW("kafka offset fetch data payload").value(); DirectBuffer kafkaOffsetFetchDataEx1 = new UnsafeBuffer(KafkaFunctions.dataEx() @@ -2321,7 +2393,7 @@ public void generateStreamsBuffer() throws Exception streams[0].write(DataFW.TYPE_ID, data29.buffer(), 0, data29.sizeof()); // - DESCRIBE - DirectBuffer kafkaDescribeBegin1 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaDescribeBeginEx1 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .describe() .topic("topic") @@ -2330,7 +2402,7 @@ public void generateStreamsBuffer() throws Exception .config("config3") .build() .build()); - BeginFW begin40 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin42 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x000000000000003bL) // INI @@ -2340,18 +2412,18 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x000000000000005fL) .traceId(0x000000000000003bL) .affinity(0x0000000000000000L) - .extension(kafkaDescribeBegin1, 0, kafkaDescribeBegin1.capacity()) + .extension(kafkaDescribeBeginEx1, 0, kafkaDescribeBeginEx1.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin40.buffer(), 0, begin40.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin42.buffer(), 0, begin42.sizeof()); - DirectBuffer kafkaDescribeBegin2 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaDescribeBeginEx2 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .describe() .topic("topic") // configs omitted .build() .build()); - BeginFW begin41 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin43 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x000000000000003aL) // REP @@ -2361,9 +2433,9 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x0000000000000060L) .traceId(0x000000000000003bL) .affinity(0x0000000000000000L) - .extension(kafkaDescribeBegin2, 0, kafkaDescribeBegin2.capacity()) + .extension(kafkaDescribeBeginEx2, 0, kafkaDescribeBeginEx2.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin41.buffer(), 0, begin41.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin43.buffer(), 0, begin43.sizeof()); DirectBuffer kafkaDescribeDataPayload = new String8FW("kafka describe payload").value(); DirectBuffer kafkaDescribeDataEx1 = new UnsafeBuffer(KafkaFunctions.dataEx() @@ -2391,7 +2463,7 @@ public void generateStreamsBuffer() throws Exception streams[0].write(DataFW.TYPE_ID, data30.buffer(), 0, data30.sizeof()); // - FETCH - DirectBuffer kafkaFetchBegin1 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaFetchBeginEx1 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .fetch() .topic("topic") @@ -2410,7 +2482,7 @@ public void generateStreamsBuffer() throws Exception .deltaType("NONE") .build() .build()); - BeginFW begin42 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin44 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x000000000000003dL) // INI @@ -2420,11 +2492,11 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x0000000000000062L) .traceId(0x000000000000003dL) .affinity(0x0000000000000000L) - .extension(kafkaFetchBegin1, 0, kafkaFetchBegin1.capacity()) + .extension(kafkaFetchBeginEx1, 0, kafkaFetchBeginEx1.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin42.buffer(), 0, begin42.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin44.buffer(), 0, begin44.sizeof()); - DirectBuffer kafkaFetchBegin2 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaFetchBeginEx2 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .fetch() .topic("topic") @@ -2437,7 +2509,7 @@ public void generateStreamsBuffer() throws Exception .deltaType("JSON_PATCH") .build() .build()); - BeginFW begin43 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin45 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x000000000000003cL) // REP @@ -2447,9 +2519,9 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x0000000000000063L) .traceId(0x000000000000003dL) .affinity(0x0000000000000000L) - .extension(kafkaFetchBegin2, 0, kafkaFetchBegin2.capacity()) + .extension(kafkaFetchBeginEx2, 0, kafkaFetchBeginEx2.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin43.buffer(), 0, begin43.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin45.buffer(), 0, begin45.sizeof()); DirectBuffer kafkaFetchDataPayload = new String8FW("kafka fetch payload").value(); DirectBuffer kafkaFetchDataEx1 = new UnsafeBuffer(KafkaFunctions.dataEx() @@ -2493,7 +2565,7 @@ public void generateStreamsBuffer() throws Exception .build() .build() .build()); - FlushFW flush10 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + FlushFW flush11 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x000000000000003dL) // INI @@ -2506,19 +2578,18 @@ public void generateStreamsBuffer() throws Exception .reserved(0x00000000) .extension(kafkaFetchFlushEx, 0, kafkaFetchFlushEx.capacity()) .build(); - streams[0].write(FlushFW.TYPE_ID, flush10.buffer(), 0, flush10.sizeof()); + streams[0].write(FlushFW.TYPE_ID, flush11.buffer(), 0, flush11.sizeof()); // - PRODUCE - DirectBuffer kafkaProduceBegin1 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaProduceBeginEx1 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .produce() .transaction("transaction") - .producerId(0x770042) .topic("topic") .partition(2, 42_000, 77_000) .build() .build()); - BeginFW begin44 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin46 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x000000000000003fL) // INI @@ -2528,20 +2599,19 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x0000000000000066L) .traceId(0x000000000000003fL) .affinity(0x0000000000000000L) - .extension(kafkaProduceBegin1, 0, kafkaProduceBegin1.capacity()) + .extension(kafkaProduceBeginEx1, 0, kafkaProduceBeginEx1.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin44.buffer(), 0, begin44.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin46.buffer(), 0, begin46.sizeof()); - DirectBuffer kafkaProduceBegin2 = new UnsafeBuffer(KafkaFunctions.beginEx() + DirectBuffer kafkaProduceBeginEx2 = new UnsafeBuffer(KafkaFunctions.beginEx() .typeId(KAFKA_TYPE_ID) .produce() .transaction("transaction") - .producerId(0x210088) .topic("topic") .partition(1, 21_000) .build() .build()); - BeginFW begin45 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin47 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x000000000000003eL) // REP @@ -2551,9 +2621,9 @@ public void generateStreamsBuffer() throws Exception .timestamp(0x0000000000000067L) .traceId(0x000000000000003fL) .affinity(0x0000000000000000L) - .extension(kafkaProduceBegin2, 0, kafkaProduceBegin2.capacity()) + .extension(kafkaProduceBeginEx2, 0, kafkaProduceBeginEx2.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin45.buffer(), 0, begin45.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin47.buffer(), 0, begin47.sizeof()); DirectBuffer kafkaProduceDataPayload = new String8FW("kafka produce payload").value(); DirectBuffer kafkaProduceDataEx1 = new UnsafeBuffer(KafkaFunctions.dataEx() @@ -2561,6 +2631,8 @@ public void generateStreamsBuffer() throws Exception .produce() .deferred(999) .timestamp(0x68) + .producerId(0x77L) + .producerEpoch((short) 0x42) .sequence(777) .ackMode("LEADER_ONLY") .key("key") @@ -2591,7 +2663,7 @@ public void generateStreamsBuffer() throws Exception .key("key") .build() .build()); - FlushFW flush11 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + FlushFW flush12 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x000000090000000fL) // north_kafka_cache_client .routedId(0x0000000900000010L) // south_kafka_cache_server .streamId(0x000000000000003fL) // INI @@ -2604,7 +2676,7 @@ public void generateStreamsBuffer() throws Exception .reserved(0x00000000) .extension(kafkaProduceFlushEx, 0, kafkaProduceFlushEx.capacity()) .build(); - streams[0].write(FlushFW.TYPE_ID, flush11.buffer(), 0, flush11.sizeof()); + streams[0].write(FlushFW.TYPE_ID, flush12.buffer(), 0, flush12.sizeof()); // amqp extension DirectBuffer amqpBeginEx1 = new UnsafeBuffer(AmqpFunctions.beginEx() @@ -2614,7 +2686,7 @@ public void generateStreamsBuffer() throws Exception .senderSettleMode("SETTLED") .receiverSettleMode("FIRST") .build()); - BeginFW begin46 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin48 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x0000000900000025L) // north_amqp_server .routedId(0x0000000900000026L) // north_fan_server .streamId(0x0000000000000041L) // INI @@ -2626,7 +2698,7 @@ public void generateStreamsBuffer() throws Exception .affinity(0x0000000000000000L) .extension(amqpBeginEx1, 0, amqpBeginEx1.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin46.buffer(), 0, begin46.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin48.buffer(), 0, begin48.sizeof()); DirectBuffer amqpBeginEx2 = new UnsafeBuffer(AmqpFunctions.beginEx() .typeId(AMQP_TYPE_ID) @@ -2635,7 +2707,7 @@ public void generateStreamsBuffer() throws Exception .senderSettleMode("MIXED") .receiverSettleMode("SECOND") .build()); - BeginFW begin47 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + BeginFW begin49 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x0000000900000025L) // north_amqp_server .routedId(0x0000000900000026L) // north_fan_server .streamId(0x0000000000000040L) // REP @@ -2647,7 +2719,7 @@ public void generateStreamsBuffer() throws Exception .affinity(0x0000000000000000L) .extension(amqpBeginEx2, 0, amqpBeginEx2.capacity()) .build(); - streams[0].write(BeginFW.TYPE_ID, begin47.buffer(), 0, begin47.sizeof()); + streams[0].write(BeginFW.TYPE_ID, begin49.buffer(), 0, begin49.sizeof()); DirectBuffer amqpPayload = new String8FW("amqp payload").value(); DirectBuffer amqpDataEx1 = new UnsafeBuffer(AmqpFunctions.dataEx() @@ -2770,7 +2842,7 @@ public void generateStreamsBuffer() throws Exception AMQP_TYPE_ID, 0, 0, 0, // int32 typeId 3 // uint8 AmqpCapabilities }); - FlushFW flush12 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) + FlushFW flush13 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity()) .originId(0x0000000900000025L) // north_amqp_server .routedId(0x0000000900000026L) // north_fan_server .streamId(0x0000000000000041L) // INI @@ -2783,7 +2855,7 @@ public void generateStreamsBuffer() throws Exception .reserved(0x00000000) .extension(amqpFlushEx, 0, amqpFlushEx.capacity()) .build(); - streams[0].write(FlushFW.TYPE_ID, flush12.buffer(), 0, flush12.sizeof()); + streams[0].write(FlushFW.TYPE_ID, flush13.buffer(), 0, flush13.sizeof()); DirectBuffer amqpAbortEx = new UnsafeBuffer(AmqpFunctions.abortEx() .typeId(AMQP_TYPE_ID) diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data0 b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data0 index ca669c8230..eeef5860f3 100644 Binary files a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data0 and b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data0 differ diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.pcap b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.pcap index eef6dcff71..7d791d8a02 100644 Binary files a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.pcap and b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.pcap differ diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.txt b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.txt index e2f3ab7c5a..0fb4a556a5 100644 --- a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.txt +++ b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.txt @@ -2665,10 +2665,10 @@ Zilla Frame .... ...1 = RETAIN: Set (1) QoS: EXACTLY_ONCE (2) -Frame 61: 381 bytes on wire (3048 bits), 381 bytes captured (3048 bits) +Frame 61: 383 bytes on wire (3064 bits), 383 bytes captured (3064 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::20, Dst: fe80::21 -Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 192, Ack: 193, Len: 307 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 192, Ack: 193, Len: 309 Zilla Frame Frame Type ID: 0x00000002 Frame Type: DATA @@ -2712,6 +2712,7 @@ Zilla Frame QoS: AT_LEAST_ONCE (1) Flags: 0x00 .... ...0 = RETAIN: Not set (0) + Packet ID: 0x0000 Expiry Interval: 42 Content Type: Content Type Length: 12 @@ -2748,17 +2749,17 @@ Zilla Frame Length: 7 Value: value77 -Frame 62: 349 bytes on wire (2792 bits), 349 bytes captured (2792 bits) +Frame 62: 351 bytes on wire (2808 bits), 351 bytes captured (2808 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::21, Dst: fe80::20 -Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 193, Ack: 499, Len: 275 +Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 193, Ack: 501, Len: 277 Zilla Frame Frame Type ID: 0x00000002 Frame Type: DATA Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00001c80 + Offset: 0x00001c88 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -2795,6 +2796,7 @@ Zilla Frame QoS: EXACTLY_ONCE (2) Flags: 0x01 .... ...1 = RETAIN: Set (1) + Packet ID: 0x0042 Expiry Interval: 77 Content Type: Content Type Length: 12 @@ -2827,7 +2829,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00001d40 + Offset: 0x00001d50 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -2915,7 +2917,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00001df8 + Offset: 0x00001e08 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -3003,7 +3005,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00001eb0 + Offset: 0x00001ec0 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -3101,7 +3103,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00001fa0 + Offset: 0x00001fb0 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -3187,7 +3189,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002088 + Offset: 0x00002098 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -3239,7 +3241,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002108 + Offset: 0x00002118 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -3291,7 +3293,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002188 + Offset: 0x00002198 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -3319,17 +3321,17 @@ Zilla Frame Length: 6 Value: Reason -Frame 70: 269 bytes on wire (2152 bits), 269 bytes captured (2152 bits) +Frame 70: 272 bytes on wire (2176 bits), 272 bytes captured (2176 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::24, Dst: fe80::25 -Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 195 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 198 Zilla Frame Frame Type ID: 0x00000001 Frame Type: BEGIN Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000021f8 + Offset: 0x00002208 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -3355,7 +3357,8 @@ Zilla Frame .... ..1. = CLEAN_START: Set (1) .... .0.. = WILL: Not set (0) Expiry: 42 - QoS Maximum: 2 + Subscribe QoS Maximum: 2 + Publish QoS Maximum: 1 Packet Size Maximum: 42000 Capabilities: 0x01 .... ...1 = RETAIN: Set (1) @@ -3366,17 +3369,17 @@ Zilla Frame Length: 9 Client ID: client-id -Frame 71: 269 bytes on wire (2152 bits), 269 bytes captured (2152 bits) +Frame 71: 272 bytes on wire (2176 bits), 272 bytes captured (2176 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::25, Dst: fe80::24 -Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 195, Len: 195 +Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 198, Len: 198 Zilla Frame Frame Type ID: 0x00000001 Frame Type: BEGIN Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002268 + Offset: 0x00002280 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -3402,7 +3405,8 @@ Zilla Frame .... ..1. = CLEAN_START: Set (1) .... .1.. = WILL: Set (1) Expiry: 42 - QoS Maximum: 2 + Subscribe QoS Maximum: 1 + Publish QoS Maximum: 2 Packet Size Maximum: 42000 Capabilities: 0x0f .... ...1 = RETAIN: Set (1) @@ -3416,14 +3420,14 @@ Zilla Frame Frame 72: 280 bytes on wire (2240 bits), 280 bytes captured (2240 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::24, Dst: fe80::25 -Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 195, Ack: 196, Len: 206 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 198, Ack: 199, Len: 206 Zilla Frame Frame Type ID: 0x00000002 Frame Type: DATA Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000022d8 + Offset: 0x000022f8 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -3462,14 +3466,14 @@ Zilla Frame Frame 73: 280 bytes on wire (2240 bits), 280 bytes captured (2240 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::25, Dst: fe80::24 -Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 196, Ack: 401, Len: 206 +Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 199, Ack: 404, Len: 206 Zilla Frame Frame Type ID: 0x00000002 Frame Type: DATA Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002358 + Offset: 0x00002378 Origin ID: 0x0000000900000022 Origin Namespace: example Origin Binding: north_mqtt_server @@ -3505,7 +3509,42 @@ Zilla Frame Deferred: 88 Data Kind: WILL (0x01) -Frame 74: 317 bytes on wire (2536 bits), 317 bytes captured (2536 bits) +Frame 74: 252 bytes on wire (2016 bits), 252 bytes captured (2016 bits) +Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) +Internet Protocol Version 6, Src: fe80::24, Dst: fe80::25 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 404, Ack: 405, Len: 178 +Zilla Frame + Frame Type ID: 0x00000005 + Frame Type: FLUSH + Protocol Type ID: 0x00000000 + Protocol Type: + Worker: 0 + Offset: 0x000023f8 + Origin ID: 0x0000000900000022 + Origin Namespace: example + Origin Binding: north_mqtt_server + Routed ID: 0x0000000900000023 + Routed Namespace: example + Routed Binding: north_mqtt_kafka_mapping + Stream ID: 0x0000000000000025 + Initial ID: 0x0000000000000025 + Reply ID: 0x0000000000000024 + Direction: INI + Sequence: 401 + Acknowledge: 402 + Maximum: 7777 + Timestamp: 0x0000000000000143 + Trace ID: 0x0000000000000025 + Authorization: 0x0000000000000000 + Budget ID: 0x0000000000000000 + Reserved: 0 + Extension: mqtt + Stream Type ID: 0x761ad4d0 + Stream Type: mqtt + Kind: SESSION (2) + Packet ID: 0x2142 + +Frame 75: 317 bytes on wire (2536 bits), 317 bytes captured (2536 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::26, Dst: fe80::27 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 243 @@ -3515,7 +3554,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000023d8 + Offset: 0x00002458 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -3559,7 +3598,7 @@ Zilla Frame Partition ID: 77 Partition ID: 88 -Frame 75: 301 bytes on wire (2408 bits), 301 bytes captured (2408 bits) +Frame 76: 301 bytes on wire (2408 bits), 301 bytes captured (2408 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::27, Dst: fe80::26 Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 243, Len: 227 @@ -3569,7 +3608,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002478 + Offset: 0x000024f8 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -3609,7 +3648,7 @@ Zilla Frame Length: 4 Size: 0 -Frame 76: 379 bytes on wire (3032 bits), 379 bytes captured (3032 bits) +Frame 77: 379 bytes on wire (3032 bits), 379 bytes captured (3032 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::26, Dst: fe80::27 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 243, Ack: 228, Len: 305 @@ -3619,7 +3658,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002508 + Offset: 0x00002588 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -3680,7 +3719,7 @@ Zilla Frame Partition ID: 201 Partition ID: 202 -Frame 77: 307 bytes on wire (2456 bits), 307 bytes captured (2456 bits) +Frame 78: 307 bytes on wire (2456 bits), 307 bytes captured (2456 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::26, Dst: fe80::27 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 548, Ack: 228, Len: 233 @@ -3690,7 +3729,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000025e0 + Offset: 0x00002660 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -3724,7 +3763,7 @@ Zilla Frame Leader Epoch: 42 Correlation ID: 77 -Frame 78: 261 bytes on wire (2088 bits), 261 bytes captured (2088 bits) +Frame 79: 261 bytes on wire (2088 bits), 261 bytes captured (2088 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::26, Dst: fe80::27 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 781, Ack: 228, Len: 187 @@ -3734,7 +3773,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002670 + Offset: 0x000026f0 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -3759,7 +3798,7 @@ Zilla Frame Length: 11 Consumer ID: consumer-id -Frame 79: 306 bytes on wire (2448 bits), 306 bytes captured (2448 bits) +Frame 80: 306 bytes on wire (2448 bits), 306 bytes captured (2448 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::28, Dst: fe80::29 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 232 @@ -3769,7 +3808,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000026d8 + Offset: 0x00002758 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -3810,7 +3849,7 @@ Zilla Frame Length: 5 Metadata: 1122334455 -Frame 80: 301 bytes on wire (2408 bits), 301 bytes captured (2408 bits) +Frame 81: 301 bytes on wire (2408 bits), 301 bytes captured (2408 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::29, Dst: fe80::28 Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 232, Len: 227 @@ -3820,7 +3859,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002768 + Offset: 0x000027e8 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -3860,7 +3899,7 @@ Zilla Frame Length (varint32): 01 Length: 0 -Frame 81: 291 bytes on wire (2328 bits), 291 bytes captured (2328 bits) +Frame 82: 291 bytes on wire (2328 bits), 291 bytes captured (2328 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::28, Dst: fe80::29 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 232, Ack: 228, Len: 217 @@ -3870,7 +3909,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000027f8 + Offset: 0x00002878 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -3904,7 +3943,7 @@ Zilla Frame Length: 4 Size: 0 -Frame 82: 343 bytes on wire (2744 bits), 343 bytes captured (2744 bits) +Frame 83: 343 bytes on wire (2744 bits), 343 bytes captured (2744 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::29, Dst: fe80::28 Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 228, Ack: 449, Len: 269 @@ -3914,7 +3953,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002878 + Offset: 0x000028f8 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -3970,7 +4009,7 @@ Zilla Frame Length (varint32): 01 Length: 0 -Frame 83: 287 bytes on wire (2296 bits), 287 bytes captured (2296 bits) +Frame 84: 287 bytes on wire (2296 bits), 287 bytes captured (2296 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::30, Dst: fe80::31 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 213 @@ -3980,7 +4019,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002930 + Offset: 0x000029b0 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4013,7 +4052,7 @@ Zilla Frame Consumer ID: consumer-id Timeout: 0 -Frame 84: 287 bytes on wire (2296 bits), 287 bytes captured (2296 bits) +Frame 85: 287 bytes on wire (2296 bits), 287 bytes captured (2296 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::31, Dst: fe80::30 Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 213, Len: 213 @@ -4023,7 +4062,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000029b0 + Offset: 0x00002a30 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4056,7 +4095,7 @@ Zilla Frame Consumer ID: consumer-id Timeout: 999999 -Frame 85: 658 bytes on wire (5264 bits), 658 bytes captured (5264 bits) +Frame 86: 658 bytes on wire (5264 bits), 658 bytes captured (5264 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::32, Dst: fe80::33 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 584 @@ -4066,7 +4105,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002a30 + Offset: 0x00002ab0 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4309,7 +4348,7 @@ Zilla Frame Ack Mode ID: 0 Ack Mode: NONE -Frame 86: 407 bytes on wire (3256 bits), 407 bytes captured (3256 bits) +Frame 87: 407 bytes on wire (3256 bits), 407 bytes captured (3256 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::33, Dst: fe80::32 Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 584, Len: 333 @@ -4319,7 +4358,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002c20 + Offset: 0x00002ca0 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4388,7 +4427,7 @@ Zilla Frame Ack Mode ID: 1 Ack Mode: LEADER_ONLY -Frame 87: 339 bytes on wire (2712 bits), 339 bytes captured (2712 bits) +Frame 88: 339 bytes on wire (2712 bits), 339 bytes captured (2712 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::33, Dst: fe80::32 Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 334, Ack: 584, Len: 265 @@ -4398,7 +4437,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002d18 + Offset: 0x00002d98 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4451,7 +4490,7 @@ Zilla Frame Ack Mode ID: -1 Ack Mode: IN_SYNC_REPLICAS -Frame 88: 459 bytes on wire (3672 bits), 459 bytes captured (3672 bits) +Frame 89: 459 bytes on wire (3672 bits), 459 bytes captured (3672 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::32, Dst: fe80::33 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 584, Ack: 599, Len: 385 @@ -4461,7 +4500,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002dc8 + Offset: 0x00002e48 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4555,17 +4594,17 @@ Zilla Frame Length: 6 Value: value2 -Frame 89: 385 bytes on wire (3080 bits), 385 bytes captured (3080 bits) +Frame 90: 395 bytes on wire (3160 bits), 395 bytes captured (3160 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::32, Dst: fe80::33 -Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 969, Ack: 599, Len: 311 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 969, Ack: 599, Len: 321 Zilla Frame Frame Type ID: 0x00000002 Frame Type: DATA Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002ef0 + Offset: 0x00002f70 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4601,6 +4640,8 @@ Zilla Frame Merged API: PRODUCE (0) Deferred: 100 Timestamp: 0x0000000000000053 + Producer ID: 0x0000000000000077 + Producer Epoch: 0x0042 Partition: 1 [77000] Partition ID: 1 Partition Offset: 77000 @@ -4641,17 +4682,17 @@ Zilla Frame Length: 6 Value: value2 -Frame 90: 304 bytes on wire (2432 bits), 304 bytes captured (2432 bits) +Frame 91: 304 bytes on wire (2432 bits), 304 bytes captured (2432 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::32, Dst: fe80::33 -Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 1280, Ack: 599, Len: 230 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 1290, Ack: 599, Len: 230 Zilla Frame Frame Type ID: 0x00000005 Frame Type: FLUSH Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00002fd0 + Offset: 0x00003058 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4685,17 +4726,17 @@ Zilla Frame Metadata: metadata Correlation ID: 77 -Frame 91: 420 bytes on wire (3360 bits), 420 bytes captured (3360 bits) +Frame 92: 420 bytes on wire (3360 bits), 420 bytes captured (3360 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::32, Dst: fe80::33 -Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 1510, Ack: 599, Len: 346 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 1520, Ack: 599, Len: 346 Zilla Frame Frame Type ID: 0x00000005 Frame Type: FLUSH Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003060 + Offset: 0x000030e8 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4774,7 +4815,77 @@ Zilla Frame Length: 3 Key: key -Frame 92: 260 bytes on wire (2080 bits), 260 bytes captured (2080 bits) +Frame 93: 263 bytes on wire (2104 bits), 263 bytes captured (2104 bits) +Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) +Internet Protocol Version 6, Src: fe80::132, Dst: fe80::133 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 189 +Zilla Frame + Frame Type ID: 0x00000001 + Frame Type: BEGIN + Protocol Type ID: 0x00000000 + Protocol Type: + Worker: 0 + Offset: 0x000031e8 + Origin ID: 0x000000090000000f + Origin Namespace: example + Origin Binding: north_kafka_cache_client + Routed ID: 0x0000000900000010 + Routed Namespace: example + Routed Binding: south_kafka_cache_server + Stream ID: 0x0000000000000133 + Initial ID: 0x0000000000000133 + Reply ID: 0x0000000000000132 + Direction: INI + Sequence: 0 + Acknowledge: 0 + Maximum: 0 + Timestamp: 0x0000000000000056 + Trace ID: 0x0000000000000035 + Authorization: 0x0000000000000000 + Affinity: 0x0000000000000000 + Extension: kafka + Stream Type ID: 0xe1204b08 + Stream Type: kafka + API: INIT_PRODUCER_ID (22) + Producer ID: 0x0000000000000077 + Producer Epoch: 0x0042 + +Frame 94: 263 bytes on wire (2104 bits), 263 bytes captured (2104 bits) +Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) +Internet Protocol Version 6, Src: fe80::133, Dst: fe80::132 +Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 189, Len: 189 +Zilla Frame + Frame Type ID: 0x00000001 + Frame Type: BEGIN + Protocol Type ID: 0x00000000 + Protocol Type: + Worker: 0 + Offset: 0x00003250 + Origin ID: 0x000000090000000f + Origin Namespace: example + Origin Binding: north_kafka_cache_client + Routed ID: 0x0000000900000010 + Routed Namespace: example + Routed Binding: south_kafka_cache_server + Stream ID: 0x0000000000000132 + Initial ID: 0x0000000000000133 + Reply ID: 0x0000000000000132 + Direction: REP + Sequence: 0 + Acknowledge: 0 + Maximum: 0 + Timestamp: 0x0000000000000057 + Trace ID: 0x0000000000000035 + Authorization: 0x0000000000000000 + Affinity: 0x0000000000000000 + Extension: kafka + Stream Type ID: 0xe1204b08 + Stream Type: kafka + API: INIT_PRODUCER_ID (22) + Producer ID: 0x0000000000000088 + Producer Epoch: 0x0021 + +Frame 95: 260 bytes on wire (2080 bits), 260 bytes captured (2080 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::34, Dst: fe80::35 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 186 @@ -4784,7 +4895,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003160 + Offset: 0x000032b8 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4810,7 +4921,7 @@ Zilla Frame Length: 5 Topic: topic -Frame 93: 260 bytes on wire (2080 bits), 260 bytes captured (2080 bits) +Frame 96: 260 bytes on wire (2080 bits), 260 bytes captured (2080 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::35, Dst: fe80::34 Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 186, Len: 186 @@ -4820,7 +4931,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000031c0 + Offset: 0x00003318 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4846,7 +4957,7 @@ Zilla Frame Length: 5 Topic: topic -Frame 94: 317 bytes on wire (2536 bits), 317 bytes captured (2536 bits) +Frame 97: 317 bytes on wire (2536 bits), 317 bytes captured (2536 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::34, Dst: fe80::35 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 186, Ack: 187, Len: 243 @@ -4856,7 +4967,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003220 + Offset: 0x00003378 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4902,17 +5013,17 @@ Zilla Frame Partition ID: 100 Leader ID: 4200 -Frame 95: 285 bytes on wire (2280 bits), 285 bytes captured (2280 bits) +Frame 98: 288 bytes on wire (2304 bits), 288 bytes captured (2304 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::36, Dst: fe80::37 -Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 211 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 214 Zilla Frame Frame Type ID: 0x00000001 Frame Type: BEGIN Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000032c0 + Offset: 0x00003418 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4934,9 +5045,6 @@ Zilla Frame Stream Type ID: 0xe1204b08 Stream Type: kafka API: OFFSET_COMMIT (8) - Topic: topic - Length: 5 - Topic: topic Group ID: group Length: 5 Group ID: group @@ -4946,18 +5054,22 @@ Zilla Frame Instance ID: instance Length: 8 Instance ID: instance + Host: host + Length: 4 + Host: host + Port: 4242 -Frame 96: 285 bytes on wire (2280 bits), 285 bytes captured (2280 bits) +Frame 99: 288 bytes on wire (2304 bits), 288 bytes captured (2304 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::37, Dst: fe80::36 -Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 211, Len: 211 +Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 214, Len: 214 Zilla Frame Frame Type ID: 0x00000001 Frame Type: BEGIN Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003340 + Offset: 0x00003498 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -4979,9 +5091,6 @@ Zilla Frame Stream Type ID: 0xe1204b08 Stream Type: kafka API: OFFSET_COMMIT (8) - Topic: topic - Length: 5 - Topic: topic Group ID: group Length: 5 Group ID: group @@ -4991,18 +5100,22 @@ Zilla Frame Instance ID: instance Length: 8 Instance ID: instance + Host: host + Length: 4 + Host: host + Port: 4242 -Frame 97: 340 bytes on wire (2720 bits), 340 bytes captured (2720 bits) +Frame 100: 346 bytes on wire (2768 bits), 346 bytes captured (2768 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::36, Dst: fe80::37 -Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 211, Ack: 212, Len: 266 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 214, Ack: 215, Len: 272 Zilla Frame Frame Type ID: 0x00000002 Frame Type: DATA Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000033c0 + Offset: 0x00003518 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5035,6 +5148,9 @@ Zilla Frame Stream Type ID: 0xe1204b08 Stream Type: kafka API: OFFSET_COMMIT (8) + Topic: test + Length: 4 + Topic: test Progress: 21 [1234] Partition ID: 21 Partition Offset: 1234 @@ -5046,7 +5162,7 @@ Zilla Frame Generation ID: 42 Leader Epoch: 77 -Frame 98: 301 bytes on wire (2408 bits), 301 bytes captured (2408 bits) +Frame 101: 301 bytes on wire (2408 bits), 301 bytes captured (2408 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::38, Dst: fe80::39 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 227 @@ -5056,7 +5172,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003470 + Offset: 0x000035d0 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5096,7 +5212,7 @@ Zilla Frame Partition ID: 77 Partition ID: 88 -Frame 99: 289 bytes on wire (2312 bits), 289 bytes captured (2312 bits) +Frame 102: 289 bytes on wire (2312 bits), 289 bytes captured (2312 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::39, Dst: fe80::38 Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 227, Len: 215 @@ -5106,7 +5222,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003500 + Offset: 0x00003660 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5143,7 +5259,7 @@ Zilla Frame Size: 1 Partition ID: 42 -Frame 100: 382 bytes on wire (3056 bits), 382 bytes captured (3056 bits) +Frame 103: 382 bytes on wire (3056 bits), 382 bytes captured (3056 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::38, Dst: fe80::39 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 227, Ack: 216, Len: 308 @@ -5153,7 +5269,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003580 + Offset: 0x000036e0 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5211,7 +5327,7 @@ Zilla Frame Length: 9 Metadata: metadata3 -Frame 101: 295 bytes on wire (2360 bits), 295 bytes captured (2360 bits) +Frame 104: 295 bytes on wire (2360 bits), 295 bytes captured (2360 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::3a, Dst: fe80::3b Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 221 @@ -5221,7 +5337,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003660 + Offset: 0x000037c0 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5259,7 +5375,7 @@ Zilla Frame Length: 7 Config: config3 -Frame 102: 268 bytes on wire (2144 bits), 268 bytes captured (2144 bits) +Frame 105: 268 bytes on wire (2144 bits), 268 bytes captured (2144 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::3b, Dst: fe80::3a Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 221, Len: 194 @@ -5269,7 +5385,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000036e8 + Offset: 0x00003848 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5298,7 +5414,7 @@ Zilla Frame Length: 4 Size: 0 -Frame 103: 337 bytes on wire (2696 bits), 337 bytes captured (2696 bits) +Frame 106: 337 bytes on wire (2696 bits), 337 bytes captured (2696 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::3a, Dst: fe80::3b Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 221, Ack: 195, Len: 263 @@ -5308,7 +5424,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003750 + Offset: 0x000038b0 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5366,7 +5482,7 @@ Zilla Frame Length: 6 Value: value3 -Frame 104: 363 bytes on wire (2904 bits), 363 bytes captured (2904 bits) +Frame 107: 363 bytes on wire (2904 bits), 363 bytes captured (2904 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::3c, Dst: fe80::3d Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 289 @@ -5376,7 +5492,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003800 + Offset: 0x00003960 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5462,7 +5578,7 @@ Zilla Frame Isolation: READ_UNCOMMITTED (0) Delta Type: NONE (0) -Frame 105: 315 bytes on wire (2520 bits), 315 bytes captured (2520 bits) +Frame 108: 315 bytes on wire (2520 bits), 315 bytes captured (2520 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::3d, Dst: fe80::3c Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 289, Len: 241 @@ -5472,7 +5588,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000038c8 + Offset: 0x00003a28 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5522,7 +5638,7 @@ Zilla Frame Isolation: READ_COMMITTED (1) Delta Type: JSON_PATCH (1) -Frame 106: 390 bytes on wire (3120 bits), 390 bytes captured (3120 bits) +Frame 109: 390 bytes on wire (3120 bits), 390 bytes captured (3120 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::3c, Dst: fe80::3d Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 289, Ack: 242, Len: 316 @@ -5532,7 +5648,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003960 + Offset: 0x00003ac0 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5608,7 +5724,7 @@ Zilla Frame Length: 6 Value: value2 -Frame 107: 336 bytes on wire (2688 bits), 336 bytes captured (2688 bits) +Frame 110: 336 bytes on wire (2688 bits), 336 bytes captured (2688 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::3c, Dst: fe80::3d Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 605, Ack: 242, Len: 262 @@ -5618,7 +5734,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003a48 + Offset: 0x00003ba8 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5673,17 +5789,17 @@ Zilla Frame Key: key1 Evaluation: LAZY (0) -Frame 108: 310 bytes on wire (2480 bits), 310 bytes captured (2480 bits) +Frame 111: 302 bytes on wire (2416 bits), 302 bytes captured (2416 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::3e, Dst: fe80::3f -Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 236 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 228 Zilla Frame Frame Type ID: 0x00000001 Frame Type: BEGIN Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003af8 + Offset: 0x00003c58 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5708,7 +5824,6 @@ Zilla Frame Transaction: transaction Length: 11 Transaction: transaction - Producer ID: 0x0000000000770042 Topic: topic Length: 5 Topic: topic @@ -5721,17 +5836,17 @@ Zilla Frame Length: -1 Metadata: -Frame 109: 310 bytes on wire (2480 bits), 310 bytes captured (2480 bits) +Frame 112: 302 bytes on wire (2416 bits), 302 bytes captured (2416 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::3f, Dst: fe80::3e -Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 236, Len: 236 +Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 228, Len: 228 Zilla Frame Frame Type ID: 0x00000001 Frame Type: BEGIN Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003b90 + Offset: 0x00003ce8 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5756,7 +5871,6 @@ Zilla Frame Transaction: transaction Length: 11 Transaction: transaction - Producer ID: 0x0000000000210088 Topic: topic Length: 5 Topic: topic @@ -5769,17 +5883,17 @@ Zilla Frame Length: -1 Metadata: -Frame 110: 343 bytes on wire (2744 bits), 343 bytes captured (2744 bits) +Frame 113: 353 bytes on wire (2824 bits), 353 bytes captured (2824 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::3e, Dst: fe80::3f -Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 236, Ack: 237, Len: 269 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 228, Ack: 229, Len: 279 Zilla Frame Frame Type ID: 0x00000002 Frame Type: DATA Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003c28 + Offset: 0x00003d78 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5814,6 +5928,8 @@ Zilla Frame API: PRODUCE (0) Deferred: 999 Timestamp: 0x0000000000000068 + Producer ID: 0x0000000000000077 + Producer Epoch: 0x0042 Sequence: 777 CRC32C: 0x00000000 Ack Mode ID: 1 @@ -5845,17 +5961,17 @@ Zilla Frame Length: 6 Value: value2 -Frame 111: 295 bytes on wire (2360 bits), 295 bytes captured (2360 bits) +Frame 114: 295 bytes on wire (2360 bits), 295 bytes captured (2360 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::3e, Dst: fe80::3f -Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 505, Ack: 237, Len: 221 +Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 507, Ack: 229, Len: 221 Zilla Frame Frame Type ID: 0x00000005 Frame Type: FLUSH Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003ce0 + Offset: 0x00003e38 Origin ID: 0x000000090000000f Origin Namespace: example Origin Binding: north_kafka_cache_client @@ -5893,7 +6009,7 @@ Zilla Frame Key: key Error: 0 -Frame 112: 248 bytes on wire (1984 bits), 248 bytes captured (1984 bits) +Frame 115: 248 bytes on wire (1984 bits), 248 bytes captured (1984 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::40, Dst: fe80::41 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 174 @@ -5903,7 +6019,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003d68 + Offset: 0x00003ec0 Origin ID: 0x0000000900000025 Origin Namespace: example Origin Binding: north_amqp_server @@ -5931,7 +6047,7 @@ Zilla Frame Sender Settle Mode: SETTLED (1) Receiver Settle Mode: FIRST (0) -Frame 113: 248 bytes on wire (1984 bits), 248 bytes captured (1984 bits) +Frame 116: 248 bytes on wire (1984 bits), 248 bytes captured (1984 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::41, Dst: fe80::40 Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 174, Len: 174 @@ -5941,7 +6057,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003dd0 + Offset: 0x00003f28 Origin ID: 0x0000000900000025 Origin Namespace: example Origin Binding: north_amqp_server @@ -5969,7 +6085,7 @@ Zilla Frame Sender Settle Mode: MIXED (2) Receiver Settle Mode: SECOND (1) -Frame 114: 433 bytes on wire (3464 bits), 433 bytes captured (3464 bits) +Frame 117: 433 bytes on wire (3464 bits), 433 bytes captured (3464 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::40, Dst: fe80::41 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 174, Ack: 175, Len: 359 @@ -5979,7 +6095,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003e38 + Offset: 0x00003f90 Origin ID: 0x0000000900000025 Origin Namespace: example Origin Binding: north_amqp_server @@ -6074,7 +6190,7 @@ Zilla Frame Body Kind: VALUE (9) Deferred: 9999 -Frame 115: 526 bytes on wire (4208 bits), 526 bytes captured (4208 bits) +Frame 118: 526 bytes on wire (4208 bits), 526 bytes captured (4208 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::41, Dst: fe80::40 Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 175, Ack: 533, Len: 452 @@ -6084,7 +6200,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00003f58 + Offset: 0x000040b0 Origin ID: 0x0000000900000025 Origin Namespace: example Origin Binding: north_amqp_server @@ -6199,7 +6315,7 @@ Zilla Frame Body Kind: VALUE_STRING32 (2) Deferred: 3333 -Frame 116: 498 bytes on wire (3984 bits), 498 bytes captured (3984 bits) +Frame 119: 498 bytes on wire (3984 bits), 498 bytes captured (3984 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::40, Dst: fe80::41 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 533, Ack: 627, Len: 424 @@ -6209,7 +6325,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x000040d8 + Offset: 0x00004230 Origin ID: 0x0000000900000025 Origin Namespace: example Origin Binding: north_amqp_server @@ -6316,7 +6432,7 @@ Zilla Frame Body Kind: VALUE_STRING32 (2) Deferred: 4444 -Frame 117: 242 bytes on wire (1936 bits), 242 bytes captured (1936 bits) +Frame 120: 242 bytes on wire (1936 bits), 242 bytes captured (1936 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::40, Dst: fe80::41 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 957, Ack: 627, Len: 168 @@ -6326,7 +6442,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00004238 + Offset: 0x00004390 Origin ID: 0x0000000900000025 Origin Namespace: example Origin Binding: north_amqp_server @@ -6350,7 +6466,7 @@ Zilla Frame Stream Type: amqp Capabilities: SEND_AND_RECEIVE (3) -Frame 118: 239 bytes on wire (1912 bits), 239 bytes captured (1912 bits) +Frame 121: 239 bytes on wire (1912 bits), 239 bytes captured (1912 bits) Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00) Internet Protocol Version 6, Src: fe80::40, Dst: fe80::41 Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 1125, Ack: 627, Len: 165 @@ -6360,7 +6476,7 @@ Zilla Frame Protocol Type ID: 0x00000000 Protocol Type: Worker: 0 - Offset: 0x00004298 + Offset: 0x000043f0 Origin ID: 0x0000000900000025 Origin Namespace: example Origin Binding: north_amqp_server diff --git a/incubator/command-generate/pom.xml b/incubator/command-generate/pom.xml index 18e2c21d1d..d956af5203 100644 --- a/incubator/command-generate/pom.xml +++ b/incubator/command-generate/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml @@ -87,19 +87,25 @@ io.aklivity.zilla - validator-avro + model-avro ${project.version} provided io.aklivity.zilla - validator-core + model-core ${project.version} provided io.aklivity.zilla - validator-json + model-json + ${project.version} + provided + + + io.aklivity.zilla + model-protobuf ${project.version} provided diff --git a/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/airline/ConfigGenerator.java b/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/airline/ConfigGenerator.java index ea664debf1..caf0718114 100644 --- a/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/airline/ConfigGenerator.java +++ b/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/airline/ConfigGenerator.java @@ -28,9 +28,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.yaml.YAMLMapper; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.validator.core.config.IntegerValidatorConfig; -import io.aklivity.zilla.runtime.validator.core.config.StringValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.model.core.config.IntegerModelConfig; +import io.aklivity.zilla.runtime.model.core.config.StringModelConfig; public abstract class ConfigGenerator { @@ -40,9 +40,9 @@ public abstract class ConfigGenerator protected static final String VERSION_LATEST = "latest"; protected static final Pattern JSON_CONTENT_TYPE = Pattern.compile("^application/(?:.+\\+)?json$"); - protected final Map validators = Map.of( - "string", StringValidatorConfig.builder().build(), - "integer", IntegerValidatorConfig.builder().build() + protected final Map models = Map.of( + "string", StringModelConfig.builder().build(), + "integer", IntegerModelConfig.builder().build() ); protected final Matcher jsonContentType = JSON_CONTENT_TYPE.matcher(""); diff --git a/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGenerator.java b/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGenerator.java index 381452608f..cba4f9c148 100644 --- a/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGenerator.java +++ b/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGenerator.java @@ -55,11 +55,11 @@ import io.aklivity.zilla.runtime.engine.config.EngineConfig; import io.aklivity.zilla.runtime.engine.config.EngineConfigWriter; import io.aklivity.zilla.runtime.engine.config.GuardedConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; import io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder; import io.aklivity.zilla.runtime.engine.config.RouteConfigBuilder; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; -import io.aklivity.zilla.runtime.validator.json.config.JsonValidatorConfig; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; public class AsyncApiHttpProxyConfigGenerator extends AsyncApiConfigGenerator @@ -357,7 +357,7 @@ private HttpRequestConfigBuilder injectContent( if (hasJsonContentType()) { request. - content(JsonValidatorConfig::builder) + content(JsonModelConfig::builder) .catalog() .name(INLINE_CATALOG_NAME) .inject(catalog -> injectSchemas(catalog, messages)) @@ -396,13 +396,13 @@ private HttpRequestConfigBuilder injectPathParams( Parameter parameter = parameters.get(name); if (parameter.schema != null && parameter.schema.type != null) { - ValidatorConfig validator = validators.get(parameter.schema.type); - if (validator != null) + ModelConfig model = models.get(parameter.schema.type); + if (model != null) { request .pathParam() .name(name) - .validator(validator) + .model(model) .build(); } } diff --git a/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGenerator.java b/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGenerator.java index 2d80accf23..edc1d1d971 100644 --- a/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGenerator.java +++ b/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGenerator.java @@ -46,7 +46,7 @@ import io.aklivity.zilla.runtime.engine.config.EngineConfig; import io.aklivity.zilla.runtime.engine.config.EngineConfigWriter; import io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder; -import io.aklivity.zilla.runtime.validator.json.config.JsonValidatorConfig; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; public class AsyncApiMqttProxyConfigGenerator extends AsyncApiConfigGenerator @@ -250,7 +250,7 @@ private BindingConfigBuilder injectMqttServerOptions( .options(MqttOptionsConfig::builder) .topic() .name(topic) - .content(JsonValidatorConfig::builder) + .content(JsonModelConfig::builder) .catalog() .name(INLINE_CATALOG_NAME) .inject(cataloged -> injectJsonSchemas(cataloged, messages, APPLICATION_JSON)) diff --git a/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java b/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java index 04f5e65cfa..50b86387a9 100644 --- a/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java +++ b/incubator/command-generate/src/main/java/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java @@ -58,11 +58,11 @@ import io.aklivity.zilla.runtime.engine.config.EngineConfig; import io.aklivity.zilla.runtime.engine.config.EngineConfigWriter; import io.aklivity.zilla.runtime.engine.config.GuardedConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; import io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder; import io.aklivity.zilla.runtime.engine.config.RouteConfigBuilder; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; -import io.aklivity.zilla.runtime.validator.json.config.JsonValidatorConfig; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; public class OpenApiHttpProxyConfigGenerator extends OpenApiConfigGenerator @@ -335,7 +335,7 @@ private HttpRequestConfigBuilder injectContent( if (schema != null) { request. - content(JsonValidatorConfig::builder) + content(JsonModelConfig::builder) .catalog() .name(INLINE_CATALOG_NAME) .schema() @@ -358,8 +358,8 @@ private HttpRequestConfigBuilder injectParams( { if (parameter.schema != null && parameter.schema.type != null) { - ValidatorConfig validator = validators.get(parameter.schema.type); - if (validator != null) + ModelConfig model = models.get(parameter.schema.type); + if (model != null) { switch (parameter.in) { @@ -367,21 +367,21 @@ private HttpRequestConfigBuilder injectParams( request. pathParam() .name(parameter.name) - .validator(validator) + .model(model) .build(); break; case "query": request. queryParam() .name(parameter.name) - .validator(validator) + .model(model) .build(); break; case "header": request. header() .name(parameter.name) - .validator(validator) + .model(model) .build(); break; } @@ -451,7 +451,7 @@ private HttpRequestConfigBuilder injectResponses( .status(Integer.parseInt(status)) .contentType(response2.getKey()) .inject(response -> injectResponseHeaders(responses1, response)) - .content(JsonValidatorConfig::builder) + .content(JsonModelConfig::builder) .catalog() .name(INLINE_CATALOG_NAME) .schema() @@ -476,13 +476,13 @@ private HttpResponseConfigBuilder injectResponseHeaders( for (Map.Entry header : responses.headers.entrySet()) { String name = header.getKey(); - ValidatorConfig validator = validators.get(header.getValue().schema.type); - if (validator != null) + ModelConfig model = models.get(header.getValue().schema.type); + if (model != null) { response .header() .name(name) - .validator(validator) + .model(model) .build(); } } diff --git a/incubator/command-generate/src/main/moditect/module-info.java b/incubator/command-generate/src/main/moditect/module-info.java index fe511b5c48..0c03ce63de 100644 --- a/incubator/command-generate/src/main/moditect/module-info.java +++ b/incubator/command-generate/src/main/moditect/module-info.java @@ -23,9 +23,10 @@ requires io.aklivity.zilla.runtime.catalog.inline; requires io.aklivity.zilla.runtime.guard.jwt; requires io.aklivity.zilla.runtime.vault.filesystem; - requires io.aklivity.zilla.runtime.validator.avro; - requires io.aklivity.zilla.runtime.validator.core; - requires io.aklivity.zilla.runtime.validator.json; + requires io.aklivity.zilla.runtime.model.avro; + requires io.aklivity.zilla.runtime.model.core; + requires io.aklivity.zilla.runtime.model.json; + requires io.aklivity.zilla.runtime.model.protobuf; requires com.fasterxml.jackson.dataformat.yaml; requires com.fasterxml.jackson.databind; diff --git a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/complete/zilla.yaml b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/complete/zilla.yaml index 26cea2474d..14c6d158ef 100644 --- a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/complete/zilla.yaml +++ b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/complete/zilla.yaml @@ -45,14 +45,14 @@ bindings: path: id: string content: - type: json + model: json catalog: catalog0: - subject: item - path: /items method: POST content: - type: json + model: json catalog: catalog0: - subject: item diff --git a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/validator/zilla.yaml b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/validator/zilla.yaml index 449cdb7269..d97ca1fed1 100644 --- a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/validator/zilla.yaml +++ b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/http/proxy/validator/zilla.yaml @@ -23,14 +23,14 @@ bindings: path: id: string content: - type: json + model: json catalog: catalog0: - subject: item - path: /items method: POST content: - type: json + model: json catalog: catalog0: - subject: item diff --git a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/complete/zilla.yaml b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/complete/zilla.yaml index 23be2cb3d7..630cd6ddf8 100644 --- a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/complete/zilla.yaml +++ b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/complete/zilla.yaml @@ -34,7 +34,7 @@ bindings: topics: - name: smartylighting/streetlights/1/0/event/*/lighting/measured content: - type: json + model: json catalog: catalog0: - subject: items diff --git a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/validator/zilla.yaml b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/validator/zilla.yaml index 4204654d97..8f8348a2f0 100644 --- a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/validator/zilla.yaml +++ b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/asyncapi/mqtt/proxy/validator/zilla.yaml @@ -17,7 +17,7 @@ bindings: topics: - name: smartylighting/streetlights/1/0/event/*/lighting/measured content: - type: json + model: json catalog: catalog0: - subject: items diff --git a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/complete/zilla.yaml b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/complete/zilla.yaml index d4a05cff4a..3d4edbea3e 100644 --- a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/complete/zilla.yaml +++ b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/complete/zilla.yaml @@ -47,7 +47,7 @@ bindings: - path: /items method: POST content: - type: json + model: json catalog: catalog0: - subject: Item @@ -113,7 +113,7 @@ bindings: x-pages: integer x-next: string content: - type: json + model: json catalog: catalog0: - subject: Items @@ -124,7 +124,7 @@ bindings: content-type: - application/json content: - type: json + model: json catalog: catalog0: - subject: Item diff --git a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/jwt/zilla.yaml b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/jwt/zilla.yaml index cd157b190e..65dc139e8c 100644 --- a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/jwt/zilla.yaml +++ b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/jwt/zilla.yaml @@ -25,7 +25,7 @@ bindings: - path: /items method: POST content: - type: json + model: json catalog: catalog0: - subject: Item diff --git a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/validator/zilla.yaml b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/validator/zilla.yaml index 2e33053261..c3e2168170 100644 --- a/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/validator/zilla.yaml +++ b/incubator/command-generate/src/test/resources/io/aklivity/zilla/runtime/command/generate/internal/openapi/http/proxy/validator/zilla.yaml @@ -25,7 +25,7 @@ bindings: - path: /items method: POST content: - type: json + model: json catalog: catalog0: - subject: Item @@ -90,7 +90,7 @@ bindings: x-pages: integer x-next: string content: - type: json + model: json catalog: catalog0: - subject: Items @@ -101,7 +101,7 @@ bindings: content-type: - application/json content: - type: json + model: json catalog: catalog0: - subject: Item diff --git a/incubator/command-log/pom.xml b/incubator/command-log/pom.xml index 5b2a23ff10..08af549262 100644 --- a/incubator/command-log/pom.xml +++ b/incubator/command-log/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml diff --git a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java index 0aeb4ef3eb..72f04d6576 100644 --- a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java +++ b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java @@ -110,8 +110,10 @@ import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttPublishBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttPublishDataExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttSessionBeginExFW; +import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttSessionFlushExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttSubscribeBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttSubscribeDataExFW; +import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttSubscribeFlushExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.ProxyBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.ResetFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.SignalFW; @@ -1513,7 +1515,33 @@ private void onMqttFlushEx( final OctetsFW extension = flush.extension(); final MqttFlushExFW mqttFlushEx = mqttFlushExRO.wrap(extension.buffer(), extension.offset(), extension.limit()); - final Array32FW filters = mqttFlushEx.subscribe().filters(); + + + switch (mqttFlushEx.kind()) + { + case MqttFlushExFW.KIND_SESSION: + onMqttSessionFlushEx(offset, timestamp, mqttFlushEx.session()); + break; + case MqttFlushExFW.KIND_SUBSCRIBE: + onMqttSubscribeFlushEx(offset, timestamp, mqttFlushEx.subscribe()); + break; + } + } + + private void onMqttSessionFlushEx( + int offset, + long timestamp, + MqttSessionFlushExFW session) + { + out.printf(verboseFormat, index, offset, timestamp, format("%d", session.packetId())); + } + + private void onMqttSubscribeFlushEx( + int offset, + long timestamp, + MqttSubscribeFlushExFW subscribe) + { + final Array32FW filters = subscribe.filters(); filters.forEach(f -> out.printf(verboseFormat, index, offset, timestamp, format("%s %d %d", f.pattern(), f.subscriptionId(), f.flags()))); diff --git a/incubator/command-tune/pom.xml b/incubator/command-tune/pom.xml index 0a4e91116b..504b5fd873 100644 --- a/incubator/command-tune/pom.xml +++ b/incubator/command-tune/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml diff --git a/incubator/exporter-otlp.spec/pom.xml b/incubator/exporter-otlp.spec/pom.xml index e1f4a4d536..a4a60d9231 100644 --- a/incubator/exporter-otlp.spec/pom.xml +++ b/incubator/exporter-otlp.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml diff --git a/incubator/exporter-otlp/pom.xml b/incubator/exporter-otlp/pom.xml index b83e2c9fcf..c03bc00a80 100644 --- a/incubator/exporter-otlp/pom.xml +++ b/incubator/exporter-otlp/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml diff --git a/incubator/validator-avro.spec/COPYRIGHT b/incubator/model-avro.spec/COPYRIGHT similarity index 100% rename from incubator/validator-avro.spec/COPYRIGHT rename to incubator/model-avro.spec/COPYRIGHT diff --git a/incubator/validator-avro.spec/LICENSE b/incubator/model-avro.spec/LICENSE similarity index 100% rename from incubator/validator-avro.spec/LICENSE rename to incubator/model-avro.spec/LICENSE diff --git a/incubator/validator-avro.spec/NOTICE b/incubator/model-avro.spec/NOTICE similarity index 100% rename from incubator/validator-avro.spec/NOTICE rename to incubator/model-avro.spec/NOTICE diff --git a/incubator/validator-avro.spec/NOTICE.template b/incubator/model-avro.spec/NOTICE.template similarity index 100% rename from incubator/validator-avro.spec/NOTICE.template rename to incubator/model-avro.spec/NOTICE.template diff --git a/incubator/validator-avro.spec/mvnw b/incubator/model-avro.spec/mvnw similarity index 100% rename from incubator/validator-avro.spec/mvnw rename to incubator/model-avro.spec/mvnw diff --git a/incubator/validator-avro.spec/mvnw.cmd b/incubator/model-avro.spec/mvnw.cmd similarity index 100% rename from incubator/validator-avro.spec/mvnw.cmd rename to incubator/model-avro.spec/mvnw.cmd diff --git a/incubator/validator-avro.spec/pom.xml b/incubator/model-avro.spec/pom.xml similarity index 93% rename from incubator/validator-avro.spec/pom.xml rename to incubator/model-avro.spec/pom.xml index 505f136cec..49633ad0a8 100644 --- a/incubator/validator-avro.spec/pom.xml +++ b/incubator/model-avro.spec/pom.xml @@ -8,12 +8,12 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml - validator-avro.spec - zilla::incubator::validator-avro.spec + model-avro.spec + zilla::incubator::model-avro.spec @@ -79,7 +79,7 @@ ${project.version} core - io.aklivity.zilla.specs.validator.avro.internal.types + io.aklivity.zilla.specs.model.avro.internal.types @@ -135,7 +135,7 @@ jacoco-maven-plugin - io/aklivity/zilla/specs/validator/avro/internal/types/**/*.class + io/aklivity/zilla/specs/model/avro/internal/types/**/*.class diff --git a/incubator/validator-avro.spec/src/main/moditect/module-info.java b/incubator/model-avro.spec/src/main/moditect/module-info.java similarity index 92% rename from incubator/validator-avro.spec/src/main/moditect/module-info.java rename to incubator/model-avro.spec/src/main/moditect/module-info.java index b289801500..99ce1d21bf 100644 --- a/incubator/validator-avro.spec/src/main/moditect/module-info.java +++ b/incubator/model-avro.spec/src/main/moditect/module-info.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -open module io.aklivity.zilla.specs.validator.avro +open module io.aklivity.zilla.specs.model.avro { requires transitive io.aklivity.zilla.specs.engine; } diff --git a/incubator/model-avro.spec/src/main/scripts/io/aklivity/zilla/specs/model/avro/config/model.yaml b/incubator/model-avro.spec/src/main/scripts/io/aklivity/zilla/specs/model/avro/config/model.yaml new file mode 100644 index 0000000000..dbb4b6ecdd --- /dev/null +++ b/incubator/model-avro.spec/src/main/scripts/io/aklivity/zilla/specs/model/avro/config/model.yaml @@ -0,0 +1,50 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +--- +name: test +catalogs: + test0: + type: test + options: + schema: | + { + "fields": [ + { + "name": "id", + "type": "string" + }, + { + "name": "status", + "type": "string" + } + ], + "name": "Event", + "namespace": "io.aklivity.example", + "type": "record" + } +bindings: + test: + kind: server + type: test + options: + value: + model: avro + view: json + catalog: + catalog0: + - subject: test0 + version: latest + exit: test diff --git a/incubator/model-avro.spec/src/main/scripts/io/aklivity/zilla/specs/model/avro/schema/avro.schema.patch.json b/incubator/model-avro.spec/src/main/scripts/io/aklivity/zilla/specs/model/avro/schema/avro.schema.patch.json new file mode 100644 index 0000000000..7d1a7c526b --- /dev/null +++ b/incubator/model-avro.spec/src/main/scripts/io/aklivity/zilla/specs/model/avro/schema/avro.schema.patch.json @@ -0,0 +1,136 @@ +[ + { + "op": "add", + "path": "/$defs/converter/types/enum/-", + "value": "avro" + }, + { + "op": "add", + "path": "/$defs/converter/allOf/-", + "value": + { + "if": + { + "properties": + { + "model": + { + "const": "avro" + } + } + }, + "then": + { + "properties": + { + "model": + { + "const": "avro" + }, + "view": + { + "type": "string", + "enum": + [ + "json" + ] + }, + "catalog": + { + "type": "object", + "patternProperties": + { + "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": + { + "type": "array", + "items": + { + "oneOf": + [ + { + "type": "object", + "properties": + { + "id": + { + "type": "integer" + } + }, + "required": + [ + "id" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "schema": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "schema" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "strategy": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "strategy" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "subject": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "subject" + ], + "additionalProperties": false + } + ] + } + } + }, + "maxProperties": 1 + } + }, + "additionalProperties": false + } + } + } +] diff --git a/incubator/model-avro.spec/src/test/java/io/aklivity/zilla/specs/model/avro/config/SchemaTest.java b/incubator/model-avro.spec/src/test/java/io/aklivity/zilla/specs/model/avro/config/SchemaTest.java new file mode 100644 index 0000000000..e907872462 --- /dev/null +++ b/incubator/model-avro.spec/src/test/java/io/aklivity/zilla/specs/model/avro/config/SchemaTest.java @@ -0,0 +1,44 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.specs.model.avro.config; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +import jakarta.json.JsonObject; + +import org.junit.Rule; +import org.junit.Test; + +import io.aklivity.zilla.specs.engine.config.ConfigSchemaRule; + +public class SchemaTest +{ + @Rule + public final ConfigSchemaRule schema = new ConfigSchemaRule() + .schemaPatch("io/aklivity/zilla/specs/engine/schema/binding/test.schema.patch.json") + .schemaPatch("io/aklivity/zilla/specs/engine/schema/catalog/test.schema.patch.json") + .schemaPatch("io/aklivity/zilla/specs/model/avro/schema/avro.schema.patch.json") + .configurationRoot("io/aklivity/zilla/specs/model/avro/config"); + + @Test + public void shouldValidateCatalog() + { + JsonObject config = schema.validate("model.yaml"); + + assertThat(config, not(nullValue())); + } +} diff --git a/incubator/validator-avro/COPYRIGHT b/incubator/model-avro/COPYRIGHT similarity index 100% rename from incubator/validator-avro/COPYRIGHT rename to incubator/model-avro/COPYRIGHT diff --git a/incubator/validator-avro/LICENSE b/incubator/model-avro/LICENSE similarity index 100% rename from incubator/validator-avro/LICENSE rename to incubator/model-avro/LICENSE diff --git a/incubator/validator-avro/NOTICE b/incubator/model-avro/NOTICE similarity index 100% rename from incubator/validator-avro/NOTICE rename to incubator/model-avro/NOTICE diff --git a/incubator/validator-avro/NOTICE.template b/incubator/model-avro/NOTICE.template similarity index 100% rename from incubator/validator-avro/NOTICE.template rename to incubator/model-avro/NOTICE.template diff --git a/incubator/validator-avro/mvnw b/incubator/model-avro/mvnw similarity index 100% rename from incubator/validator-avro/mvnw rename to incubator/model-avro/mvnw diff --git a/incubator/validator-avro/mvnw.cmd b/incubator/model-avro/mvnw.cmd similarity index 100% rename from incubator/validator-avro/mvnw.cmd rename to incubator/model-avro/mvnw.cmd diff --git a/incubator/validator-avro/pom.xml b/incubator/model-avro/pom.xml similarity index 87% rename from incubator/validator-avro/pom.xml rename to incubator/model-avro/pom.xml index 87e8b62542..88026c65a4 100644 --- a/incubator/validator-avro/pom.xml +++ b/incubator/model-avro/pom.xml @@ -8,12 +8,12 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml - validator-avro - zilla::incubator::validator-avro + model-avro + zilla::incubator::model-avro @@ -26,14 +26,14 @@ 11 11 - 0.80 + 0.88 0 ${project.groupId} - validator-avro.spec + model-avro.spec ${project.version} provided @@ -83,7 +83,7 @@ ${project.version} core - io.aklivity.zilla.runtime.validator.avro.internal.types + io.aklivity.zilla.runtime.model.avro.internal.types @@ -120,16 +120,16 @@ ${project.groupId} - validator-avro.spec + model-avro.spec - ^\Qio/aklivity/zilla/specs/validator/avro/\E - io/aklivity/zilla/runtime/validator/avro/ + ^\Qio/aklivity/zilla/specs/model/avro/\E + io/aklivity/zilla/runtime/model/avro/internal/ - io/aklivity/zilla/specs/validator/avro/schema/avro.schema.patch.json + io/aklivity/zilla/specs/model/avro/schema/avro.schema.patch.json ${project.build.directory}/classes @@ -153,7 +153,7 @@ org.apache.avro - io.aklivity.zilla.runtime.validator.avro.internal.avro + io.aklivity.zilla.runtime.model.avro.internal.avro true @@ -187,7 +187,7 @@ jacoco-maven-plugin - io/aklivity/zilla/runtime/validator/avro/internal/types/**/*.class + io/aklivity/zilla/runtime/model/avro/internal/types/**/*.class diff --git a/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/config/AvroModelConfig.java b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/config/AvroModelConfig.java new file mode 100644 index 0000000000..57005925db --- /dev/null +++ b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/config/AvroModelConfig.java @@ -0,0 +1,48 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.avro.config; + +import java.util.List; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; + +public final class AvroModelConfig extends ModelConfig +{ + public final String subject; + public final String view; + + public AvroModelConfig( + List cataloged, + String subject, + String view) + { + super("avro", cataloged); + this.subject = subject; + this.view = view; + } + + public static AvroModelConfigBuilder builder( + Function mapper) + { + return new AvroModelConfigBuilder<>(mapper::apply); + } + + public static AvroModelConfigBuilder builder() + { + return new AvroModelConfigBuilder<>(AvroModelConfig.class::cast); + } +} diff --git a/incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfigBuilder.java b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/config/AvroModelConfigBuilder.java similarity index 59% rename from incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfigBuilder.java rename to incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/config/AvroModelConfigBuilder.java index c1596da4cc..476bc23ee8 100644 --- a/incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfigBuilder.java +++ b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/config/AvroModelConfigBuilder.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.json.config; +package io.aklivity.zilla.runtime.model.avro.config; import java.util.LinkedList; import java.util.List; @@ -22,31 +22,47 @@ import io.aklivity.zilla.runtime.engine.config.CatalogedConfigBuilder; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public class JsonValidatorConfigBuilder extends ConfigBuilder> +public class AvroModelConfigBuilder extends ConfigBuilder> { - private final Function mapper; + private final Function mapper; private List catalogs; + private String subject; + private String view; - JsonValidatorConfigBuilder( - Function mapper) + AvroModelConfigBuilder( + Function mapper) { this.mapper = mapper; } @Override @SuppressWarnings("unchecked") - protected Class> thisType() + protected Class> thisType() { - return (Class>) getClass(); + return (Class>) getClass(); } - public CatalogedConfigBuilder> catalog() + public AvroModelConfigBuilder subject( + String subject) + { + this.subject = subject; + return this; + } + + public AvroModelConfigBuilder view( + String view) + { + this.view = view; + return this; + } + + public CatalogedConfigBuilder> catalog() { return CatalogedConfig.builder(this::catalog); } - public JsonValidatorConfigBuilder catalog( + public AvroModelConfigBuilder catalog( CatalogedConfig catalog) { if (catalogs == null) @@ -60,6 +76,6 @@ public JsonValidatorConfigBuilder catalog( @Override public T build() { - return mapper.apply(new JsonValidatorConfig(catalogs)); + return mapper.apply(new AvroModelConfig(catalogs, subject, view)); } } diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfig.java b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModel.java similarity index 50% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfig.java rename to incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModel.java index 5deba72254..4f7fb5f910 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfig.java +++ b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModel.java @@ -12,27 +12,34 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core.config; +package io.aklivity.zilla.runtime.model.avro.internal; -import java.util.function.Function; +import java.net.URL; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; -public class LongValidatorConfig extends ValidatorConfig +public class AvroModel implements Model { - public LongValidatorConfig() + public static final String NAME = "avro"; + + @Override + public String name() { - super("long"); + return NAME; } - public static LongValidatorConfigBuilder builder( - Function mapper) + @Override + public ModelContext supply( + EngineContext context) { - return new LongValidatorConfigBuilder<>(mapper::apply); + return new AvroModelContext(context); } - public static LongValidatorConfigBuilder builder() + @Override + public URL type() { - return new LongValidatorConfigBuilder<>(LongValidatorConfig.class::cast); + return getClass().getResource("schema/avro.schema.patch.json"); } } diff --git a/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelContext.java b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelContext.java new file mode 100644 index 0000000000..fa4a25232c --- /dev/null +++ b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelContext.java @@ -0,0 +1,49 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.avro.internal; + +import java.util.function.LongFunction; + +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.model.avro.config.AvroModelConfig; + +public class AvroModelContext implements ModelContext +{ + private final LongFunction supplyCatalog; + + public AvroModelContext( + EngineContext context) + { + this.supplyCatalog = context::supplyCatalog; + } + + @Override + public ConverterHandler supplyReadConverterHandler( + ModelConfig config) + { + return new AvroReadConverterHandler(AvroModelConfig.class.cast(config), supplyCatalog); + } + + @Override + public ConverterHandler supplyWriteConverterHandler( + ModelConfig config) + { + return new AvroWriteConverterHandler(AvroModelConfig.class.cast(config), supplyCatalog); + } +} diff --git a/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/AvroValidatorFactory.java b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelFactorySpi.java similarity index 51% rename from incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/AvroValidatorFactory.java rename to incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelFactorySpi.java index 982d366907..52e851123e 100644 --- a/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/AvroValidatorFactory.java +++ b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelFactorySpi.java @@ -12,26 +12,22 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.avro; +package io.aklivity.zilla.runtime.model.avro.internal; import java.net.URL; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; import io.aklivity.zilla.runtime.common.feature.Incubating; -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi; -import io.aklivity.zilla.runtime.validator.avro.config.AvroValidatorConfig; +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelFactorySpi; @Incubating -public final class AvroValidatorFactory implements ValidatorFactorySpi +public final class AvroModelFactorySpi implements ModelFactorySpi { @Override public String type() { - return "avro"; + return AvroModel.NAME; } public URL schema() @@ -40,11 +36,9 @@ public URL schema() } @Override - public Validator create( - ValidatorConfig config, - ToLongFunction resolveId, - LongFunction supplyCatalog) + public Model create( + Configuration config) { - return new AvroValidator(AvroValidatorConfig.class.cast(config), resolveId, supplyCatalog); + return new AvroModel(); } } diff --git a/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelHandler.java b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelHandler.java new file mode 100644 index 0000000000..184cfd10e5 --- /dev/null +++ b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelHandler.java @@ -0,0 +1,220 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.avro.internal; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; +import org.agrona.ExpandableDirectByteBuffer; +import org.agrona.collections.Int2IntHashMap; +import org.agrona.collections.Int2ObjectCache; +import org.agrona.io.DirectBufferInputStream; +import org.agrona.io.ExpandableDirectBufferOutputStream; +import org.apache.avro.AvroRuntimeException; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericDatumWriter; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.BinaryEncoder; +import org.apache.avro.io.DecoderFactory; +import org.apache.avro.io.EncoderFactory; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.SchemaConfig; +import io.aklivity.zilla.runtime.model.avro.config.AvroModelConfig; + +public abstract class AvroModelHandler +{ + protected static final String VIEW_JSON = "json"; + + private static final InputStream EMPTY_INPUT_STREAM = new ByteArrayInputStream(new byte[0]); + private static final OutputStream EMPTY_OUTPUT_STREAM = new ByteArrayOutputStream(0); + private static final int JSON_FIELD_STRUCTURE_LENGTH = "\"\":\"\",".length(); + + protected final SchemaConfig catalog; + protected final CatalogHandler handler; + protected final DecoderFactory decoderFactory; + protected final EncoderFactory encoderFactory; + protected final BinaryDecoder decoder; + protected final BinaryEncoder encoder; + protected final String subject; + protected final String view; + protected final ExpandableDirectBufferOutputStream expandable; + protected final DirectBufferInputStream in; + + private final Int2ObjectCache schemas; + private final Int2ObjectCache> readers; + private final Int2ObjectCache> writers; + private final Int2ObjectCache records; + private final Int2IntHashMap paddings; + + protected AvroModelHandler( + AvroModelConfig config, + LongFunction supplyCatalog) + { + this.decoderFactory = DecoderFactory.get(); + this.decoder = decoderFactory.binaryDecoder(EMPTY_INPUT_STREAM, null); + this.encoderFactory = EncoderFactory.get(); + this.encoder = encoderFactory.binaryEncoder(EMPTY_OUTPUT_STREAM, null); + CatalogedConfig cataloged = config.cataloged.get(0); + this.handler = supplyCatalog.apply(cataloged.id); + this.catalog = cataloged.schemas.size() != 0 ? cataloged.schemas.get(0) : null; + this.view = config.view; + this.subject = catalog != null && catalog.subject != null + ? catalog.subject + : config.subject; + this.schemas = new Int2ObjectCache<>(1, 1024, i -> {}); + this.readers = new Int2ObjectCache<>(1, 1024, i -> {}); + this.writers = new Int2ObjectCache<>(1, 1024, i -> {}); + this.records = new Int2ObjectCache<>(1, 1024, i -> {}); + this.paddings = new Int2IntHashMap(-1); + this.expandable = new ExpandableDirectBufferOutputStream(new ExpandableDirectByteBuffer()); + this.in = new DirectBufferInputStream(); + } + + protected final boolean validate( + int schemaId, + DirectBuffer buffer, + int index, + int length) + { + boolean status = false; + try + { + GenericRecord record = supplyRecord(schemaId); + in.wrap(buffer, index, length); + GenericDatumReader reader = supplyReader(schemaId); + if (reader != null) + { + reader.read(record, decoderFactory.binaryDecoder(in, decoder)); + status = true; + } + } + catch (IOException | AvroRuntimeException ex) + { + ex.printStackTrace(); + } + return status; + } + + protected final Schema supplySchema( + int schemaId) + { + return schemas.computeIfAbsent(schemaId, this::resolveSchema); + } + + protected final int supplyPadding( + int schemaId) + { + return paddings.computeIfAbsent(schemaId, id -> calculatePadding(supplySchema(id))); + } + + protected final GenericDatumReader supplyReader( + int schemaId) + { + return readers.computeIfAbsent(schemaId, this::createReader); + } + + protected final GenericDatumWriter supplyWriter( + int schemaId) + { + return writers.computeIfAbsent(schemaId, this::createWriter); + } + + protected final GenericRecord supplyRecord( + int schemaId) + { + return records.computeIfAbsent(schemaId, this::createRecord); + } + + private GenericDatumReader createReader( + int schemaId) + { + Schema schema = supplySchema(schemaId); + GenericDatumReader reader = null; + if (schema != null) + { + reader = new GenericDatumReader(schema); + } + return reader; + } + + private GenericDatumWriter createWriter( + int schemaId) + { + Schema schema = supplySchema(schemaId); + GenericDatumWriter writer = null; + if (schema != null) + { + writer = new GenericDatumWriter(schema); + } + return writer; + } + + private GenericRecord createRecord( + int schemaId) + { + Schema schema = supplySchema(schemaId); + GenericRecord record = null; + if (schema != null) + { + record = new GenericData.Record(schema); + } + return record; + } + + private Schema resolveSchema( + int schemaId) + { + Schema schema = null; + String schemaText = handler.resolve(schemaId); + if (schemaText != null) + { + schema = new Schema.Parser().parse(schemaText); + } + return schema; + } + + private int calculatePadding( + Schema schema) + { + int padding = 0; + + if (schema != null) + { + padding = 2; + for (Schema.Field field : schema.getFields()) + { + if (field.schema().getType().equals(Schema.Type.RECORD)) + { + padding += calculatePadding(field.schema()); + } + else + { + padding += field.name().getBytes().length + JSON_FIELD_STRUCTURE_LENGTH; + } + } + } + return padding; + } +} diff --git a/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroReadConverterHandler.java b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroReadConverterHandler.java new file mode 100644 index 0000000000..3518cd39fd --- /dev/null +++ b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroReadConverterHandler.java @@ -0,0 +1,147 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.avro.internal; + +import static io.aklivity.zilla.runtime.engine.catalog.CatalogHandler.NO_SCHEMA_ID; + +import java.io.IOException; +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; +import org.apache.avro.AvroRuntimeException; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericDatumWriter; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.JsonEncoder; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.avro.config.AvroModelConfig; + +public class AvroReadConverterHandler extends AvroModelHandler implements ConverterHandler +{ + public AvroReadConverterHandler( + AvroModelConfig config, + LongFunction supplyCatalog) + { + super(config, supplyCatalog); + } + + @Override + public int padding( + DirectBuffer data, + int index, + int length) + { + int padding = 0; + if (VIEW_JSON.equals(view)) + { + int schemaId = handler.resolve(data, index, length); + + if (schemaId == NO_SCHEMA_ID) + { + if (catalog.id != NO_SCHEMA_ID) + { + schemaId = catalog.id; + } + else + { + schemaId = handler.resolve(subject, catalog.version); + } + } + padding = supplyPadding(schemaId); + } + return padding; + } + + @Override + public int convert( + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + return handler.decode(data, index, length, next, this::decodePayload); + } + + private int decodePayload( + int schemaId, + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + int valLength = -1; + + if (schemaId == NO_SCHEMA_ID) + { + if (catalog.id != NO_SCHEMA_ID) + { + schemaId = catalog.id; + } + else + { + schemaId = handler.resolve(subject, catalog.version); + } + } + + if (VIEW_JSON.equals(view)) + { + deserializeRecord(schemaId, data, index, length); + int recordLength = expandable.position(); + if (recordLength > 0) + { + next.accept(expandable.buffer(), 0, recordLength); + valLength = recordLength; + } + } + else if (validate(schemaId, data, index, length)) + { + next.accept(data, index, length); + valLength = length; + } + return valLength; + } + + private void deserializeRecord( + int schemaId, + DirectBuffer buffer, + int index, + int length) + { + try + { + GenericDatumReader reader = supplyReader(schemaId); + GenericDatumWriter writer = supplyWriter(schemaId); + if (reader != null) + { + GenericRecord record = supplyRecord(schemaId); + in.wrap(buffer, index, length); + expandable.wrap(expandable.buffer()); + record = reader.read(record, decoderFactory.binaryDecoder(in, decoder)); + Schema schema = record.getSchema(); + JsonEncoder out = encoderFactory.jsonEncoder(schema, expandable); + writer.write(record, out); + out.flush(); + } + } + catch (IOException | AvroRuntimeException ex) + { + ex.printStackTrace(); + } + } +} diff --git a/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroWriteConverterHandler.java b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroWriteConverterHandler.java new file mode 100644 index 0000000000..31f1bff334 --- /dev/null +++ b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/AvroWriteConverterHandler.java @@ -0,0 +1,104 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.avro.internal; + +import java.io.IOException; +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; +import org.apache.avro.AvroRuntimeException; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericDatumWriter; +import org.apache.avro.generic.GenericRecord; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.avro.config.AvroModelConfig; + +public class AvroWriteConverterHandler extends AvroModelHandler implements ConverterHandler +{ + public AvroWriteConverterHandler( + AvroModelConfig config, + LongFunction supplyCatalog) + { + super(config, supplyCatalog); + } + + @Override + public int padding( + DirectBuffer data, + int index, + int length) + { + return handler.encodePadding(); + } + + @Override + public int convert( + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + int valLength = -1; + + int schemaId = catalog != null && catalog.id > 0 + ? catalog.id + : handler.resolve(subject, catalog.version); + + if (VIEW_JSON.equals(view)) + { + valLength = handler.encode(schemaId, data, index, length, next, this::serializeJsonRecord); + } + else if (validate(schemaId, data, index, length)) + { + valLength = handler.encode(schemaId, data, index, length, next, CatalogHandler.Encoder.IDENTITY); + } + return valLength; + } + + private int serializeJsonRecord( + int schemaId, + DirectBuffer buffer, + int index, + int length, + ValueConsumer next) + { + try + { + Schema schema = supplySchema(schemaId); + GenericDatumReader reader = supplyReader(schemaId); + GenericDatumWriter writer = supplyWriter(schemaId); + if (reader != null) + { + GenericRecord record = supplyRecord(schemaId); + in.wrap(buffer, index, length); + expandable.wrap(expandable.buffer()); + record = reader.read(record, decoderFactory.jsonDecoder(schema, in)); + encoderFactory.binaryEncoder(expandable, encoder); + writer.write(record, encoder); + encoder.flush(); + next.accept(expandable.buffer(), 0, expandable.position()); + } + } + catch (IOException | AvroRuntimeException ex) + { + ex.printStackTrace(); + } + return expandable.position(); + } +} diff --git a/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/config/AvroModelConfigAdapter.java b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/config/AvroModelConfigAdapter.java new file mode 100644 index 0000000000..f635804a60 --- /dev/null +++ b/incubator/model-avro/src/main/java/io/aklivity/zilla/runtime/model/avro/internal/config/AvroModelConfigAdapter.java @@ -0,0 +1,114 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.avro.internal.config; + +import java.util.LinkedList; +import java.util.List; + +import jakarta.json.Json; +import jakarta.json.JsonArray; +import jakarta.json.JsonArrayBuilder; +import jakarta.json.JsonObject; +import jakarta.json.JsonObjectBuilder; +import jakarta.json.JsonValue; +import jakarta.json.bind.adapter.JsonbAdapter; + +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi; +import io.aklivity.zilla.runtime.engine.config.SchemaConfig; +import io.aklivity.zilla.runtime.engine.config.SchemaConfigAdapter; +import io.aklivity.zilla.runtime.model.avro.config.AvroModelConfig; + +public final class AvroModelConfigAdapter implements ModelConfigAdapterSpi, JsonbAdapter +{ + private static final String AVRO = "avro"; + private static final String MODEL_NAME = "model"; + private static final String CATALOG_NAME = "catalog"; + private static final String SUBJECT_NAME = "subject"; + private static final String VIEW = "view"; + + private final SchemaConfigAdapter schema = new SchemaConfigAdapter(); + + @Override + public String type() + { + return AVRO; + } + + @Override + public JsonValue adaptToJson( + ModelConfig config) + { + AvroModelConfig converterConfig = (AvroModelConfig) config; + JsonObjectBuilder converter = Json.createObjectBuilder(); + + if (converterConfig.view != null) + { + converter.add(VIEW, converterConfig.view); + } + + converter.add(MODEL_NAME, AVRO); + if (converterConfig.cataloged != null && !converterConfig.cataloged.isEmpty()) + { + JsonObjectBuilder catalogs = Json.createObjectBuilder(); + for (CatalogedConfig catalog : converterConfig.cataloged) + { + JsonArrayBuilder array = Json.createArrayBuilder(); + for (SchemaConfig schemaItem: catalog.schemas) + { + array.add(schema.adaptToJson(schemaItem)); + } + catalogs.add(catalog.name, array); + } + converter.add(CATALOG_NAME, catalogs); + } + return converter.build(); + } + + @Override + public ModelConfig adaptFromJson( + JsonValue value) + { + JsonObject object = (JsonObject) value; + + assert object.containsKey(CATALOG_NAME); + + JsonObject catalogsJson = object.getJsonObject(CATALOG_NAME); + List catalogs = new LinkedList<>(); + for (String catalogName: catalogsJson.keySet()) + { + JsonArray schemasJson = catalogsJson.getJsonArray(catalogName); + List schemas = new LinkedList<>(); + for (JsonValue item : schemasJson) + { + JsonObject schemaJson = (JsonObject) item; + SchemaConfig schemaElement = schema.adaptFromJson(schemaJson); + schemas.add(schemaElement); + } + catalogs.add(new CatalogedConfig(catalogName, schemas)); + } + + String subject = object.containsKey(SUBJECT_NAME) + ? object.getString(SUBJECT_NAME) + : null; + + String view = object.containsKey(VIEW) + ? object.getString(VIEW) + : null; + + return new AvroModelConfig(catalogs, subject, view); + } +} diff --git a/incubator/validator-avro/src/main/moditect/module-info.java b/incubator/model-avro/src/main/moditect/module-info.java similarity index 55% rename from incubator/validator-avro/src/main/moditect/module-info.java rename to incubator/model-avro/src/main/moditect/module-info.java index 058ec63edf..1bae61c43e 100644 --- a/incubator/validator-avro/src/main/moditect/module-info.java +++ b/incubator/model-avro/src/main/moditect/module-info.java @@ -12,21 +12,21 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -module io.aklivity.zilla.runtime.validator.avro +module io.aklivity.zilla.runtime.model.avro { requires com.fasterxml.jackson.core; requires com.fasterxml.jackson.databind; requires org.slf4j; requires io.aklivity.zilla.runtime.engine; - exports io.aklivity.zilla.runtime.validator.avro.config; + exports io.aklivity.zilla.runtime.model.avro.config; - uses io.aklivity.zilla.runtime.validator.avro.internal.avro.Conversion; - uses io.aklivity.zilla.runtime.validator.avro.internal.avro.LogicalTypes$LogicalTypeFactory; + uses io.aklivity.zilla.runtime.model.avro.internal.avro.Conversion; + uses io.aklivity.zilla.runtime.model.avro.internal.avro.LogicalTypes$LogicalTypeFactory; - provides io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi - with io.aklivity.zilla.runtime.validator.avro.config.AvroValidatorConfigAdapter; + provides io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi + with io.aklivity.zilla.runtime.model.avro.internal.config.AvroModelConfigAdapter; - provides io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi - with io.aklivity.zilla.runtime.validator.avro.AvroValidatorFactory; + provides io.aklivity.zilla.runtime.engine.model.ModelFactorySpi + with io.aklivity.zilla.runtime.model.avro.internal.AvroModelFactorySpi; } diff --git a/incubator/model-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi b/incubator/model-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi new file mode 100644 index 0000000000..4bab78031f --- /dev/null +++ b/incubator/model-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi @@ -0,0 +1 @@ +io.aklivity.zilla.runtime.model.avro.internal.config.AvroModelConfigAdapter diff --git a/incubator/model-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi b/incubator/model-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi new file mode 100644 index 0000000000..2e6820134a --- /dev/null +++ b/incubator/model-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi @@ -0,0 +1 @@ +io.aklivity.zilla.runtime.model.avro.internal.AvroModelFactorySpi diff --git a/incubator/model-avro/src/test/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelFactorySpiTest.java b/incubator/model-avro/src/test/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelFactorySpiTest.java new file mode 100644 index 0000000000..5e89a00f20 --- /dev/null +++ b/incubator/model-avro/src/test/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelFactorySpiTest.java @@ -0,0 +1,57 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.avro.internal; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.mock; + +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.engine.model.ModelFactory; +import io.aklivity.zilla.runtime.model.avro.config.AvroModelConfig; + +public class AvroModelFactorySpiTest +{ + @Test + public void shouldLoadAndCreate() + { + Configuration config = new Configuration(); + ModelFactory factory = ModelFactory.instantiate(); + Model model = factory.create("avro", config); + + ModelContext context = new AvroModelContext(mock(EngineContext.class)); + + ModelConfig modelConfig = AvroModelConfig.builder() + .subject("test-value") + .catalog() + .name("test0") + .schema() + .subject("subject1") + .version("latest") + .build() + .build() + .build(); + + assertThat(model, instanceOf(AvroModel.class)); + assertThat(context.supplyReadConverterHandler(modelConfig), instanceOf(AvroReadConverterHandler.class)); + assertThat(context.supplyWriteConverterHandler(modelConfig), instanceOf(AvroWriteConverterHandler.class)); + } +} diff --git a/incubator/model-avro/src/test/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelTest.java b/incubator/model-avro/src/test/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelTest.java new file mode 100644 index 0000000000..5c56de4336 --- /dev/null +++ b/incubator/model-avro/src/test/java/io/aklivity/zilla/runtime/model/avro/internal/AvroModelTest.java @@ -0,0 +1,240 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.avro.internal; + +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DIRECTORY; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; + +import java.util.Properties; +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Before; +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.catalog.Catalog; +import io.aklivity.zilla.runtime.engine.catalog.CatalogContext; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.CatalogConfig; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.engine.test.internal.catalog.TestCatalog; +import io.aklivity.zilla.runtime.engine.test.internal.catalog.config.TestCatalogOptionsConfig; +import io.aklivity.zilla.runtime.model.avro.config.AvroModelConfig; + +public class AvroModelTest +{ + private static final String SCHEMA = "{\"fields\":[{\"name\":\"id\",\"type\":\"string\"}," + + "{\"name\":\"status\",\"type\":\"string\"}]," + + "\"name\":\"Event\",\"namespace\":\"io.aklivity.example\",\"type\":\"record\"}"; + + private final AvroModelConfig avroConfig = AvroModelConfig.builder() + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .build() + .build() + .build(); + private CatalogContext context; + + @Before + public void init() + { + Properties properties = new Properties(); + properties.setProperty(ENGINE_DIRECTORY.name(), "target/zilla-itests"); + Configuration config = new Configuration(properties); + Catalog catalog = new TestCatalog(config); + context = catalog.supply(mock(EngineContext.class)); + } + + @Test + public void shouldVerifyValidAvroEvent() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + AvroReadConverterHandler converter = new AvroReadConverterHandler(avroConfig, handler); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x06, 0x69, 0x64, + 0x30, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65}; + data.wrap(bytes, 0, bytes.length); + assertEquals(data.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldWriteValidAvroEvent() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + AvroWriteConverterHandler converter = new AvroWriteConverterHandler(avroConfig, handler); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x06, 0x69, 0x64, 0x30, 0x10, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x76, 0x65}; + data.wrap(bytes, 0, bytes.length); + assertEquals(data.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyInvalidAvroEvent() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + AvroReadConverterHandler converter = new AvroReadConverterHandler(avroConfig, handler); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x06, 0x69, 0x64, 0x30, 0x10}; + data.wrap(bytes, 0, bytes.length); + assertEquals(-1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldReadAvroEventExpectJson() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + AvroModelConfig config = AvroModelConfig.builder() + .view("json") + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .build() + .build() + .build(); + AvroReadConverterHandler converter = new AvroReadConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x06, 0x69, 0x64, + 0x30, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65}; + data.wrap(bytes, 0, bytes.length); + + String json = + "{" + + "\"id\":\"id0\"," + + "\"status\":\"positive\"" + + "}"; + + DirectBuffer expected = new UnsafeBuffer(); + expected.wrap(json.getBytes(), 0, json.getBytes().length); + + int progress = converter.convert(data, 0, data.capacity(), ValueConsumer.NOP); + assertEquals(expected.capacity(), progress); + + assertEquals(expected.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldWriteJsonEventExpectAvro() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + AvroModelConfig config = AvroModelConfig.builder() + .view("json") + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .build() + .build() + .build(); + AvroWriteConverterHandler converter = new AvroWriteConverterHandler(config, handler); + + DirectBuffer expected = new UnsafeBuffer(); + + byte[] bytes = {0x06, 0x69, 0x64, + 0x30, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65}; + expected.wrap(bytes, 0, bytes.length); + + String payload = + "{" + + "\"id\":\"id0\"," + + "\"status\":\"positive\"" + + "}"; + + DirectBuffer data = new UnsafeBuffer(); + data.wrap(payload.getBytes(), 0, payload.getBytes().length); + int progress = converter.convert(data, 0, data.capacity(), ValueConsumer.NOP); + assertEquals(expected.capacity(), progress); + + assertEquals(expected.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyPaddingLength() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + AvroModelConfig config = AvroModelConfig.builder() + .view("json") + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .build() + .build() + .build(); + AvroReadConverterHandler converter = new AvroReadConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x06, 0x69, 0x64, + 0x30, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65}; + data.wrap(bytes, 0, bytes.length); + + assertEquals(22, converter.padding(data, 0, data.capacity())); + + } +} diff --git a/incubator/model-avro/src/test/java/io/aklivity/zilla/runtime/model/avro/internal/config/AvroModelConfigAdapterTest.java b/incubator/model-avro/src/test/java/io/aklivity/zilla/runtime/model/avro/internal/config/AvroModelConfigAdapterTest.java new file mode 100644 index 0000000000..94af0fe895 --- /dev/null +++ b/incubator/model-avro/src/test/java/io/aklivity/zilla/runtime/model/avro/internal/config/AvroModelConfigAdapterTest.java @@ -0,0 +1,142 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.avro.internal.config; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +import jakarta.json.bind.Jsonb; +import jakarta.json.bind.JsonbBuilder; +import jakarta.json.bind.JsonbConfig; + +import org.junit.Before; +import org.junit.Test; + +import io.aklivity.zilla.runtime.model.avro.config.AvroModelConfig; + +public class AvroModelConfigAdapterTest +{ + private Jsonb jsonb; + + @Before + public void initJson() + { + JsonbConfig config = new JsonbConfig() + .withAdapters(new AvroModelConfigAdapter()); + jsonb = JsonbBuilder.create(config); + } + + @Test + public void shouldReadAvroconverter() + { + // GIVEN + String json = + "{" + + "\"view\":\"json\"," + + "\"model\": \"avro\"," + + "\"catalog\":" + + "{" + + "\"test0\":" + + "[" + + "{" + + "\"strategy\": \"topic\"," + + "\"version\": \"latest\"" + + "}," + + "{" + + "\"subject\": \"cat\"," + + "\"version\": \"latest\"" + + "}," + + "{" + + "\"id\": 42" + + "}" + + "]" + + "}" + + "}"; + + // WHEN + AvroModelConfig converter = jsonb.fromJson(json, AvroModelConfig.class); + + // THEN + assertThat(converter, not(nullValue())); + assertThat(converter.view, equalTo("json")); + assertThat(converter.model, equalTo("avro")); + assertThat(converter.cataloged.size(), equalTo(1)); + assertThat(converter.cataloged.get(0).name, equalTo("test0")); + assertThat(converter.cataloged.get(0).schemas.get(0).strategy, equalTo("topic")); + assertThat(converter.cataloged.get(0).schemas.get(0).version, equalTo("latest")); + assertThat(converter.cataloged.get(0).schemas.get(0).id, equalTo(0)); + assertThat(converter.cataloged.get(0).schemas.get(1).subject, equalTo("cat")); + assertThat(converter.cataloged.get(0).schemas.get(1).strategy, nullValue()); + assertThat(converter.cataloged.get(0).schemas.get(1).version, equalTo("latest")); + assertThat(converter.cataloged.get(0).schemas.get(1).id, equalTo(0)); + assertThat(converter.cataloged.get(0).schemas.get(2).strategy, nullValue()); + assertThat(converter.cataloged.get(0).schemas.get(2).version, nullValue()); + assertThat(converter.cataloged.get(0).schemas.get(2).id, equalTo(42)); + } + + @Test + public void shouldWriteAvroconverter() + { + // GIVEN + String expectedJson = + "{" + + "\"view\":\"json\"," + + "\"model\":\"avro\"," + + "\"catalog\":" + + "{" + + "\"test0\":" + + "[" + + "{" + + "\"strategy\":\"topic\"," + + "\"version\":\"latest\"" + + "}," + + "{" + + "\"subject\":\"cat\"," + + "\"version\":\"latest\"" + + "}," + + "{" + + "\"id\":42" + + "}" + + "]" + + "}" + + "}"; + AvroModelConfig converter = AvroModelConfig.builder() + .view("json") + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .build() + .schema() + .subject("cat") + .version("latest") + .build() + .schema() + .id(42) + .build() + .build() + .build(); + + // WHEN + String json = jsonb.toJson(converter); + + // THEN + assertThat(json, not(nullValue())); + assertThat(json, equalTo(expectedJson)); + } +} diff --git a/incubator/validator-core.spec/COPYRIGHT b/incubator/model-core.spec/COPYRIGHT similarity index 100% rename from incubator/validator-core.spec/COPYRIGHT rename to incubator/model-core.spec/COPYRIGHT diff --git a/incubator/validator-core.spec/LICENSE b/incubator/model-core.spec/LICENSE similarity index 100% rename from incubator/validator-core.spec/LICENSE rename to incubator/model-core.spec/LICENSE diff --git a/incubator/validator-core.spec/NOTICE b/incubator/model-core.spec/NOTICE similarity index 100% rename from incubator/validator-core.spec/NOTICE rename to incubator/model-core.spec/NOTICE diff --git a/incubator/validator-core.spec/NOTICE.template b/incubator/model-core.spec/NOTICE.template similarity index 100% rename from incubator/validator-core.spec/NOTICE.template rename to incubator/model-core.spec/NOTICE.template diff --git a/incubator/validator-core.spec/mvnw b/incubator/model-core.spec/mvnw similarity index 100% rename from incubator/validator-core.spec/mvnw rename to incubator/model-core.spec/mvnw diff --git a/incubator/validator-core.spec/mvnw.cmd b/incubator/model-core.spec/mvnw.cmd similarity index 100% rename from incubator/validator-core.spec/mvnw.cmd rename to incubator/model-core.spec/mvnw.cmd diff --git a/incubator/validator-core.spec/pom.xml b/incubator/model-core.spec/pom.xml similarity index 93% rename from incubator/validator-core.spec/pom.xml rename to incubator/model-core.spec/pom.xml index ee32d6be74..a6d8437d3a 100644 --- a/incubator/validator-core.spec/pom.xml +++ b/incubator/model-core.spec/pom.xml @@ -8,12 +8,12 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml - validator-core.spec - zilla::incubator::validator-core.spec + model-core.spec + zilla::incubator::model-core.spec @@ -79,7 +79,7 @@ ${project.version} core - io.aklivity.zilla.specs.validator.core.internal.types + io.aklivity.zilla.specs.model.core.internal.types @@ -135,7 +135,7 @@ jacoco-maven-plugin - io/aklivity/zilla/specs/validator/core/internal/types/**/*.class + io/aklivity/zilla/specs/model/core/internal/types/**/*.class diff --git a/incubator/validator-core.spec/src/main/moditect/module-info.java b/incubator/model-core.spec/src/main/moditect/module-info.java similarity index 92% rename from incubator/validator-core.spec/src/main/moditect/module-info.java rename to incubator/model-core.spec/src/main/moditect/module-info.java index 89567c9647..71b092d9a6 100644 --- a/incubator/validator-core.spec/src/main/moditect/module-info.java +++ b/incubator/model-core.spec/src/main/moditect/module-info.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -open module io.aklivity.zilla.specs.validator.core +open module io.aklivity.zilla.specs.model.core { requires transitive io.aklivity.zilla.specs.engine; } diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt b/incubator/model-core.spec/src/main/scripts/io/aklivity/zilla/specs/model/core/config/string.model.yaml similarity index 54% rename from specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt rename to incubator/model-core.spec/src/main/scripts/io/aklivity/zilla/specs/model/core/config/string.model.yaml index 4fae513f0c..83777f49cf 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt +++ b/incubator/model-core.spec/src/main/scripts/io/aklivity/zilla/specs/model/core/config/string.model.yaml @@ -13,19 +13,14 @@ # specific language governing permissions and limitations under the License. # -accept "zilla://streams/mqtt0" - option zilla:window 8192 - option zilla:transmission "duplex" -accepted - -read zilla:begin.ext ${mqtt:matchBeginEx() - .typeId(zilla:id("mqtt")) - .publish() - .clientId("client") - .topic("sensor/one") - .build() - .build()} - -connected - -write advise zilla:flush +--- +name: test +bindings: + test: + kind: server + type: test + options: + value: + model: string + encoding: utf_8 + exit: test diff --git a/incubator/validator-core.spec/src/main/scripts/io/aklivity/zilla/specs/validator/core/schema/integer.schema.patch.json b/incubator/model-core.spec/src/main/scripts/io/aklivity/zilla/specs/model/core/schema/integer.schema.patch.json similarity index 50% rename from incubator/validator-core.spec/src/main/scripts/io/aklivity/zilla/specs/validator/core/schema/integer.schema.patch.json rename to incubator/model-core.spec/src/main/scripts/io/aklivity/zilla/specs/model/core/schema/integer.schema.patch.json index 4e671fa357..6f19753e68 100644 --- a/incubator/validator-core.spec/src/main/scripts/io/aklivity/zilla/specs/validator/core/schema/integer.schema.patch.json +++ b/incubator/model-core.spec/src/main/scripts/io/aklivity/zilla/specs/model/core/schema/integer.schema.patch.json @@ -1,4 +1,9 @@ [ + { + "op": "add", + "path": "/$defs/converter/types/enum/-", + "value": "integer" + }, { "op": "add", "path": "/$defs/validator/types/enum/-", diff --git a/incubator/model-core.spec/src/main/scripts/io/aklivity/zilla/specs/model/core/schema/string.schema.patch.json b/incubator/model-core.spec/src/main/scripts/io/aklivity/zilla/specs/model/core/schema/string.schema.patch.json new file mode 100644 index 0000000000..80ad373a2f --- /dev/null +++ b/incubator/model-core.spec/src/main/scripts/io/aklivity/zilla/specs/model/core/schema/string.schema.patch.json @@ -0,0 +1,85 @@ +[ + { + "op": "add", + "path": "/$defs/converter/types/enum/-", + "value": "string" + }, + { + "op": "add", + "path": "/$defs/converter/allOf/-", + "value": + { + "if": + { + "properties": + { + "model": + { + "const": "string" + } + } + }, + "then": + { + "properties": + { + "model": + { + "const": "string" + }, + "encoding": + { + "type": "string", + "enum": + [ + "utf_8", + "utf_16" + ] + } + }, + "additionalProperties": false + } + } + }, + { + "op": "add", + "path": "/$defs/validator/types/enum/-", + "value": "string" + }, + { + "op": "add", + "path": "/$defs/validator/allOf/-", + "value": + { + "if": + { + "properties": + { + "model": + { + "const": "string" + } + } + }, + "then": + { + "properties": + { + "model": + { + "const": "string" + }, + "encoding": + { + "type": "string", + "enum": + [ + "utf_8" + ] + } + }, + "additionalProperties": false + } + } + } +] diff --git a/incubator/model-core.spec/src/test/java/io/aklivity/zilla/specs/model/core/config/SchemaTest.java b/incubator/model-core.spec/src/test/java/io/aklivity/zilla/specs/model/core/config/SchemaTest.java new file mode 100644 index 0000000000..e8895daca3 --- /dev/null +++ b/incubator/model-core.spec/src/test/java/io/aklivity/zilla/specs/model/core/config/SchemaTest.java @@ -0,0 +1,43 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.specs.model.core.config; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +import jakarta.json.JsonObject; + +import org.junit.Rule; +import org.junit.Test; + +import io.aklivity.zilla.specs.engine.config.ConfigSchemaRule; + +public class SchemaTest +{ + @Rule + public final ConfigSchemaRule schema = new ConfigSchemaRule() + .schemaPatch("io/aklivity/zilla/specs/engine/schema/binding/test.schema.patch.json") + .schemaPatch("io/aklivity/zilla/specs/model/core/schema/string.schema.patch.json") + .configurationRoot("io/aklivity/zilla/specs/model/core/config"); + + @Test + public void shouldValidateCatalog() + { + JsonObject config = schema.validate("string.model.yaml"); + + assertThat(config, not(nullValue())); + } +} diff --git a/incubator/validator-core/COPYRIGHT b/incubator/model-core/COPYRIGHT similarity index 100% rename from incubator/validator-core/COPYRIGHT rename to incubator/model-core/COPYRIGHT diff --git a/incubator/validator-core/LICENSE b/incubator/model-core/LICENSE similarity index 100% rename from incubator/validator-core/LICENSE rename to incubator/model-core/LICENSE diff --git a/incubator/validator-core/NOTICE b/incubator/model-core/NOTICE similarity index 100% rename from incubator/validator-core/NOTICE rename to incubator/model-core/NOTICE diff --git a/incubator/validator-core/NOTICE.template b/incubator/model-core/NOTICE.template similarity index 100% rename from incubator/validator-core/NOTICE.template rename to incubator/model-core/NOTICE.template diff --git a/incubator/validator-core/mvnw b/incubator/model-core/mvnw similarity index 100% rename from incubator/validator-core/mvnw rename to incubator/model-core/mvnw diff --git a/incubator/validator-core/mvnw.cmd b/incubator/model-core/mvnw.cmd similarity index 100% rename from incubator/validator-core/mvnw.cmd rename to incubator/model-core/mvnw.cmd diff --git a/incubator/validator-core/pom.xml b/incubator/model-core/pom.xml similarity index 87% rename from incubator/validator-core/pom.xml rename to incubator/model-core/pom.xml index 22c385aed1..2adb8d1ee8 100644 --- a/incubator/validator-core/pom.xml +++ b/incubator/model-core/pom.xml @@ -8,12 +8,12 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml - validator-core - zilla::incubator::validator-core + model-core + zilla::incubator::model-core @@ -26,14 +26,14 @@ 11 11 - 0.70 + 0.83 0 ${project.groupId} - validator-core.spec + model-core.spec ${project.version} provided @@ -79,7 +79,7 @@ ${project.version} core - io.aklivity.zilla.runtime.validator.core.internal.types + io.aklivity.zilla.runtime.model.core.internal.types @@ -116,16 +116,16 @@ ${project.groupId} - validator-core.spec + model-core.spec - ^\Qio/aklivity/zilla/specs/validator/core/\E - io/aklivity/zilla/runtime/validator/core/ + ^\Qio/aklivity/zilla/specs/model/core/\E + io/aklivity/zilla/runtime/model/core/internal/ - io/aklivity/zilla/specs/validator/core/schema/*.schema.patch.json + io/aklivity/zilla/specs/model/core/schema/*.schema.patch.json ${project.build.directory}/classes @@ -155,7 +155,7 @@ jacoco-maven-plugin - io/aklivity/zilla/runtime/validator/core/internal/types/**/*.class + io/aklivity/zilla/runtime/model/core/internal/types/**/*.class diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfig.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/IntegerModelConfig.java similarity index 55% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfig.java rename to incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/IntegerModelConfig.java index 27a4400fb4..436a1f59ca 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfig.java +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/IntegerModelConfig.java @@ -12,27 +12,27 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core.config; +package io.aklivity.zilla.runtime.model.core.config; import java.util.function.Function; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; -public class IntegerValidatorConfig extends ValidatorConfig +public class IntegerModelConfig extends ModelConfig { - public IntegerValidatorConfig() + public IntegerModelConfig() { super("integer"); } - public static IntegerValidatorConfigBuilder builder( - Function mapper) + public static IntegerModelConfigBuilder builder( + Function mapper) { - return new IntegerValidatorConfigBuilder<>(mapper::apply); + return new IntegerModelConfigBuilder<>(mapper::apply); } - public static IntegerValidatorConfigBuilder builder() + public static IntegerModelConfigBuilder builder() { - return new IntegerValidatorConfigBuilder<>(IntegerValidatorConfig.class::cast); + return new IntegerModelConfigBuilder<>(IntegerModelConfig.class::cast); } } diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfigBuilder.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/IntegerModelConfigBuilder.java similarity index 63% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfigBuilder.java rename to incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/IntegerModelConfigBuilder.java index fc843c2da7..35f5061aec 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfigBuilder.java +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/IntegerModelConfigBuilder.java @@ -12,32 +12,32 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core.config; +package io.aklivity.zilla.runtime.model.core.config; import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public class LongValidatorConfigBuilder extends ConfigBuilder> +public class IntegerModelConfigBuilder extends ConfigBuilder> { - private final Function mapper; + private final Function mapper; - LongValidatorConfigBuilder( - Function mapper) + IntegerModelConfigBuilder( + Function mapper) { this.mapper = mapper; } @Override @SuppressWarnings("unchecked") - protected Class> thisType() + protected Class> thisType() { - return (Class>) getClass(); + return (Class>) getClass(); } @Override public T build() { - return mapper.apply(new LongValidatorConfig()); + return mapper.apply(new IntegerModelConfig()); } } diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfig.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/StringModelConfig.java similarity index 61% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfig.java rename to incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/StringModelConfig.java index 7d2af19e7b..d18a80bc6d 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfig.java +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/StringModelConfig.java @@ -12,33 +12,33 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core.config; +package io.aklivity.zilla.runtime.model.core.config; import java.util.function.Function; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; -public final class StringValidatorConfig extends ValidatorConfig +public final class StringModelConfig extends ModelConfig { public static final String DEFAULT_ENCODING = "utf_8"; public final String encoding; - public StringValidatorConfig( + public StringModelConfig( String encoding) { super("string"); this.encoding = encoding != null ? encoding : DEFAULT_ENCODING; } - public static StringValidatorConfigBuilder builder( - Function mapper) + public static StringModelConfigBuilder builder( + Function mapper) { - return new StringValidatorConfigBuilder<>(mapper::apply); + return new StringModelConfigBuilder<>(mapper::apply); } - public static StringValidatorConfigBuilder builder() + public static StringModelConfigBuilder builder() { - return new StringValidatorConfigBuilder<>(StringValidatorConfig.class::cast); + return new StringModelConfigBuilder<>(StringModelConfig.class::cast); } } diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfigBuilder.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/StringModelConfigBuilder.java similarity index 63% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfigBuilder.java rename to incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/StringModelConfigBuilder.java index ce700243eb..9ef65f5738 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfigBuilder.java +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/config/StringModelConfigBuilder.java @@ -12,32 +12,32 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core.config; +package io.aklivity.zilla.runtime.model.core.config; import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public class StringValidatorConfigBuilder extends ConfigBuilder> +public class StringModelConfigBuilder extends ConfigBuilder> { - private final Function mapper; + private final Function mapper; private String encoding; - StringValidatorConfigBuilder( - Function mapper) + StringModelConfigBuilder( + Function mapper) { this.mapper = mapper; } @Override @SuppressWarnings("unchecked") - protected Class> thisType() + protected Class> thisType() { - return (Class>) getClass(); + return (Class>) getClass(); } - public StringValidatorConfigBuilder encoding( + public StringModelConfigBuilder encoding( String encoding) { this.encoding = encoding; @@ -47,6 +47,6 @@ public StringValidatorConfigBuilder encoding( @Override public T build() { - return mapper.apply(new StringValidatorConfig(encoding)); + return mapper.apply(new StringModelConfig(encoding)); } } diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/LongValidator.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerConverterHandler.java similarity index 51% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/LongValidator.java rename to incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerConverterHandler.java index dc9b4fd84b..2fb1f89880 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/LongValidator.java +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerConverterHandler.java @@ -12,43 +12,35 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core; +package io.aklivity.zilla.runtime.model.core.internal; import org.agrona.DirectBuffer; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.validator.core.config.LongValidatorConfig; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.core.config.IntegerModelConfig; -public class LongValidator implements Validator +public class IntegerConverterHandler implements ConverterHandler { - public LongValidator( - LongValidatorConfig config) + public IntegerConverterHandler( + IntegerModelConfig config) { } @Override - public boolean read( + public int convert( DirectBuffer data, int index, - int length) + int length, + ValueConsumer next) { - return validate(data, index, length); - } + boolean valid = length == 4; - @Override - public boolean write( - DirectBuffer data, - int index, - int length) - { - return validate(data, index, length); - } + if (valid) + { + next.accept(data, index, length); + } - private boolean validate( - DirectBuffer data, - int index, - int length) - { - return length == 8; + return valid ? length : -1; } } diff --git a/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModel.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModel.java new file mode 100644 index 0000000000..58c214bafd --- /dev/null +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModel.java @@ -0,0 +1,45 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import java.net.URL; + +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; + +public class IntegerModel implements Model +{ + public static final String NAME = "integer"; + + @Override + public String name() + { + return NAME; + } + + @Override + public ModelContext supply( + EngineContext context) + { + return new IntegerModelContext(context); + } + + @Override + public URL type() + { + return getClass().getResource("schema/integer.schema.patch.json"); + } +} diff --git a/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModelContext.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModelContext.java new file mode 100644 index 0000000000..1320b7c7df --- /dev/null +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModelContext.java @@ -0,0 +1,57 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.model.core.config.IntegerModelConfig; + +public class IntegerModelContext implements ModelContext +{ + public IntegerModelContext( + EngineContext context) + { + } + + @Override + public ConverterHandler supplyReadConverterHandler( + ModelConfig config) + { + return supply(config); + } + + @Override + public ConverterHandler supplyWriteConverterHandler( + ModelConfig config) + { + return supply(config); + } + + @Override + public ValidatorHandler supplyValidatorHandler( + ModelConfig config) + { + return new IntegerValidatorHandler(IntegerModelConfig.class.cast(config)); + } + + private IntegerConverterHandler supply( + ModelConfig config) + { + return new IntegerConverterHandler(IntegerModelConfig.class.cast(config)); + } +} diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/IntegerValidatorFactory.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModelFactorySpi.java similarity index 52% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/IntegerValidatorFactory.java rename to incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModelFactorySpi.java index da00c4fb58..1b50c0c260 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/IntegerValidatorFactory.java +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModelFactorySpi.java @@ -12,26 +12,22 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core; +package io.aklivity.zilla.runtime.model.core.internal; import java.net.URL; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; import io.aklivity.zilla.runtime.common.feature.Incubating; -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi; -import io.aklivity.zilla.runtime.validator.core.config.IntegerValidatorConfig; +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelFactorySpi; @Incubating -public class IntegerValidatorFactory implements ValidatorFactorySpi +public class IntegerModelFactorySpi implements ModelFactorySpi { @Override public String type() { - return "integer"; + return IntegerModel.NAME; } @Override @@ -41,11 +37,9 @@ public URL schema() } @Override - public Validator create( - ValidatorConfig config, - ToLongFunction resolveId, - LongFunction supplyCatalog) + public Model create( + Configuration config) { - return new IntegerValidator(IntegerValidatorConfig.class.cast(config)); + return new IntegerModel(); } } diff --git a/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerValidatorHandler.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerValidatorHandler.java new file mode 100644 index 0000000000..48dd67b542 --- /dev/null +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/IntegerValidatorHandler.java @@ -0,0 +1,59 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import org.agrona.DirectBuffer; + +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.core.config.IntegerModelConfig; + +public class IntegerValidatorHandler implements ValidatorHandler +{ + private int pendingBytes; + + public IntegerValidatorHandler( + IntegerModelConfig config) + { + } + + @Override + public boolean validate( + int flags, + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + boolean valid; + + if ((flags & FLAGS_INIT) != 0x00) + { + pendingBytes = 4; + } + + pendingBytes = pendingBytes - length; + + if ((flags & FLAGS_FIN) != 0x00) + { + valid = pendingBytes == 0; + } + else + { + valid = pendingBytes >= 0; + } + return valid; + } +} diff --git a/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringConverterHandler.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringConverterHandler.java new file mode 100644 index 0000000000..4398a134a8 --- /dev/null +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringConverterHandler.java @@ -0,0 +1,50 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import org.agrona.DirectBuffer; + +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.core.config.StringModelConfig; + +public class StringConverterHandler implements ConverterHandler +{ + private StringEncoding encoding; + + public StringConverterHandler( + StringModelConfig config) + { + this.encoding = StringEncoding.of(config.encoding); + } + + @Override + public int convert( + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + int valLength = -1; + + if (encoding.validate(data, index, length)) + { + next.accept(data, index, length); + valLength = length; + } + + return valLength; + } +} diff --git a/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringEncoding.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringEncoding.java new file mode 100644 index 0000000000..d3fbda2281 --- /dev/null +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringEncoding.java @@ -0,0 +1,117 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import org.agrona.DirectBuffer; + +public enum StringEncoding +{ + UTF_8 + { + @Override + public boolean validate( + DirectBuffer data, + int index, + int length) + { + final int limit = index + length; + validate: + while (index < limit) + { + final int charByte0 = data.getByte(index); + final int charByteCount = (charByte0 & 0b1000_0000) != 0 + ? Integer.numberOfLeadingZeros((~charByte0 & 0xff) << 24) + : 1; + + final int charByteLimit = index + charByteCount; + for (int charByteIndex = index + 1; charByteIndex < charByteLimit; charByteIndex++) + { + if (charByteIndex >= limit || (data.getByte(charByteIndex) & 0b11000000) != 0b10000000) + { + break validate; + } + } + index += charByteCount; + } + return index == limit; + } + }, + + UTF_16 + { + @Override + public boolean validate( + DirectBuffer data, + int index, + int length) + { + final int limit = index + length; + + while (index < limit) + { + if (index == limit - 1) + { + break; + } + + int highByte = data.getByte(index) & 0xFF; + int lowByte = data.getByte(index + 1) & 0xFF; + int codeUnit = (highByte << 8) | lowByte; + + if (codeUnit >= 0xD800 && codeUnit <= 0xDBFF) + { + if (index + 3 >= limit) + { + break; + } + int secondHighByte = data.getByte(index + 2) & 0xFF; + int secondLowByte = data.getByte(index + 3) & 0xFF; + int secondCodeUnit = (secondHighByte << 8) | secondLowByte; + if (secondCodeUnit < 0xDC00 || secondCodeUnit > 0xDFFF) + { + break; + } + index += 4; + } + else if (codeUnit >= 0xDC00 && codeUnit <= 0xDFFF) + { + break; + } + else + { + index += 2; + } + } + return index == limit; + } + }; + + public abstract boolean validate( + DirectBuffer data, + int index, + int length); + + public static StringEncoding of( + String encoding) + { + switch (encoding) + { + case "utf_16": + return UTF_16; + default: + return UTF_8; + } + } +} diff --git a/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringModel.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringModel.java new file mode 100644 index 0000000000..ab456f1756 --- /dev/null +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringModel.java @@ -0,0 +1,45 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import java.net.URL; + +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; + +public class StringModel implements Model +{ + public static final String NAME = "string"; + + @Override + public String name() + { + return NAME; + } + + @Override + public ModelContext supply( + EngineContext context) + { + return new StringModelContext(context); + } + + @Override + public URL type() + { + return getClass().getResource("schema/string.schema.patch.json"); + } +} diff --git a/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringModelContext.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringModelContext.java new file mode 100644 index 0000000000..112d15d1e4 --- /dev/null +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringModelContext.java @@ -0,0 +1,57 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.model.core.config.StringModelConfig; + +public class StringModelContext implements ModelContext +{ + public StringModelContext( + EngineContext context) + { + } + + @Override + public ConverterHandler supplyReadConverterHandler( + ModelConfig config) + { + return supply(config); + } + + @Override + public ConverterHandler supplyWriteConverterHandler( + ModelConfig config) + { + return supply(config); + } + + @Override + public ValidatorHandler supplyValidatorHandler( + ModelConfig config) + { + return new StringValidatorHandler(StringModelConfig.class.cast(config)); + } + + private StringConverterHandler supply( + ModelConfig config) + { + return new StringConverterHandler(StringModelConfig.class.cast(config)); + } +} diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/StringValidatorFactory.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringModelFactorySpi.java similarity index 54% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/StringValidatorFactory.java rename to incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringModelFactorySpi.java index be226c7e35..5a0d547d33 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/StringValidatorFactory.java +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringModelFactorySpi.java @@ -12,21 +12,17 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core; +package io.aklivity.zilla.runtime.model.core.internal; import java.net.URL; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; import io.aklivity.zilla.runtime.common.feature.Incubating; -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi; -import io.aklivity.zilla.runtime.validator.core.config.StringValidatorConfig; +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelFactorySpi; @Incubating -public final class StringValidatorFactory implements ValidatorFactorySpi +public final class StringModelFactorySpi implements ModelFactorySpi { @Override public String type() @@ -41,11 +37,11 @@ public URL schema() } @Override - public Validator create( - ValidatorConfig config, - ToLongFunction resolveId, - LongFunction supplyCatalog) + public Model create( + Configuration config) { - return new StringValidator(StringValidatorConfig.class.cast(config)); + return new StringModel(); } + + } diff --git a/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringValidatorEncoding.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringValidatorEncoding.java new file mode 100644 index 0000000000..801b67d841 --- /dev/null +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringValidatorEncoding.java @@ -0,0 +1,94 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import static io.aklivity.zilla.runtime.engine.model.ValidatorHandler.FLAGS_FIN; +import static io.aklivity.zilla.runtime.engine.model.ValidatorHandler.FLAGS_INIT; + +import org.agrona.DirectBuffer; + +public enum StringValidatorEncoding +{ + UTF_8 + { + private int pendingCharBytes; + + @Override + public boolean validate( + int flags, + DirectBuffer data, + int index, + int length) + { + if ((flags & FLAGS_INIT) != 0x00) + { + pendingCharBytes = 0; + } + + final int limit = index + length; + + while (index < limit) + { + final int charByte0 = data.getByte(index); + + if (pendingCharBytes > 0) + { + if ((charByte0 & 0b11000000) != 0b10000000) + { + break; + } + pendingCharBytes--; + index++; + } + else + { + final int charByteCount = (charByte0 & 0b1000_0000) != 0 + ? Integer.numberOfLeadingZeros((~charByte0 & 0xff) << 24) + : 1; + final int charByteLimit = index + charByteCount; + for (int charByteIndex = index + 1; charByteIndex < charByteLimit; charByteIndex++) + { + if (charByteIndex >= limit || (data.getByte(charByteIndex) & 0b11000000) != 0b10000000) + { + pendingCharBytes = charByteLimit - charByteIndex; + break; + } + } + index += pendingCharBytes == 0 ? charByteCount : pendingCharBytes; + } + } + + return (flags & FLAGS_FIN) == 0x00 + ? index == limit + : pendingCharBytes == 0 && index == limit; + } + }; + + public abstract boolean validate( + int flags, + DirectBuffer data, + int index, + int length); + + public static StringValidatorEncoding of( + String encoding) + { + switch (encoding) + { + default: + return UTF_8; + } + } +} diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/IntegerValidator.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringValidatorHandler.java similarity index 50% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/IntegerValidator.java rename to incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringValidatorHandler.java index 0d0fff271f..642a7aed50 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/IntegerValidator.java +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/StringValidatorHandler.java @@ -12,42 +12,32 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core; +package io.aklivity.zilla.runtime.model.core.internal; import org.agrona.DirectBuffer; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.validator.core.config.IntegerValidatorConfig; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.core.config.StringModelConfig; -public class IntegerValidator implements Validator +public class StringValidatorHandler implements ValidatorHandler { - public IntegerValidator(IntegerValidatorConfig config) - { - } + private final StringValidatorEncoding encoding; - @Override - public boolean read( - DirectBuffer data, - int index, - int length) + public StringValidatorHandler( + StringModelConfig config) { - return validate(data, index, length); + this.encoding = StringValidatorEncoding.of(config.encoding); } @Override - public boolean write( - DirectBuffer data, - int index, - int length) - { - return validate(data, index, length); - } - - private boolean validate( + public boolean validate( + int flags, DirectBuffer data, int index, - int length) + int length, + ValueConsumer next) { - return length == 4; + return encoding.validate(flags, data, index, length); } } diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfigAdapter.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/config/IntegerModelConfigAdapter.java similarity index 66% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfigAdapter.java rename to incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/config/IntegerModelConfigAdapter.java index 6a7927ff67..dc7638660f 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfigAdapter.java +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/config/IntegerModelConfigAdapter.java @@ -12,16 +12,17 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core.config; +package io.aklivity.zilla.runtime.model.core.internal.config; import jakarta.json.Json; import jakarta.json.JsonValue; import jakarta.json.bind.adapter.JsonbAdapter; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi; +import io.aklivity.zilla.runtime.model.core.config.IntegerModelConfig; -public class IntegerValidatorConfigAdapter implements ValidatorConfigAdapterSpi, JsonbAdapter +public class IntegerModelConfigAdapter implements ModelConfigAdapterSpi, JsonbAdapter { @Override public String type() @@ -31,15 +32,15 @@ public String type() @Override public JsonValue adaptToJson( - ValidatorConfig options) + ModelConfig options) { return Json.createValue(type()); } @Override - public ValidatorConfig adaptFromJson( + public ModelConfig adaptFromJson( JsonValue object) { - return new IntegerValidatorConfig(); + return new IntegerModelConfig(); } } diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfigAdapter.java b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/config/StringModelConfigAdapter.java similarity index 63% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfigAdapter.java rename to incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/config/StringModelConfigAdapter.java index 5536b28f2e..5265efecd9 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfigAdapter.java +++ b/incubator/model-core/src/main/java/io/aklivity/zilla/runtime/model/core/internal/config/StringModelConfigAdapter.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core.config; +package io.aklivity.zilla.runtime.model.core.internal.config; import jakarta.json.Json; import jakarta.json.JsonObject; @@ -21,26 +21,27 @@ import jakarta.json.JsonValue; import jakarta.json.bind.adapter.JsonbAdapter; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi; +import io.aklivity.zilla.runtime.model.core.config.StringModelConfig; -public final class StringValidatorConfigAdapter implements ValidatorConfigAdapterSpi, JsonbAdapter +public final class StringModelConfigAdapter implements ModelConfigAdapterSpi, JsonbAdapter { - private static final String TYPE_NAME = "type"; + private static final String MODEL_NAME = "model"; private static final String ENCODING_NAME = "encoding"; @Override public JsonValue adaptToJson( - ValidatorConfig config) + ModelConfig config) { JsonValue result; - String encoding = ((StringValidatorConfig) config).encoding; - if (encoding != null && !encoding.isEmpty() && !encoding.equals(StringValidatorConfig.DEFAULT_ENCODING)) + String encoding = ((StringModelConfig) config).encoding; + if (encoding != null && !encoding.isEmpty() && !encoding.equals(StringModelConfig.DEFAULT_ENCODING)) { - JsonObjectBuilder validator = Json.createObjectBuilder(); - validator.add(TYPE_NAME, type()); - validator.add(ENCODING_NAME, encoding); - result = validator.build(); + JsonObjectBuilder converter = Json.createObjectBuilder(); + converter.add(MODEL_NAME, type()); + converter.add(ENCODING_NAME, encoding); + result = converter.build(); } else { @@ -50,13 +51,13 @@ public JsonValue adaptToJson( } @Override - public StringValidatorConfig adaptFromJson( + public StringModelConfig adaptFromJson( JsonValue value) { - StringValidatorConfig result = null; + StringModelConfig result = null; if (value instanceof JsonString) { - result = StringValidatorConfig.builder().build(); + result = StringModelConfig.builder().build(); } else if (value instanceof JsonObject) { @@ -64,7 +65,7 @@ else if (value instanceof JsonObject) String encoding = object.containsKey(ENCODING_NAME) ? object.getString(ENCODING_NAME) : null; - result = new StringValidatorConfig(encoding); + result = new StringModelConfig(encoding); } else { diff --git a/incubator/model-core/src/main/moditect/module-info.java b/incubator/model-core/src/main/moditect/module-info.java new file mode 100644 index 0000000000..e85c2b65f9 --- /dev/null +++ b/incubator/model-core/src/main/moditect/module-info.java @@ -0,0 +1,28 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +module io.aklivity.zilla.runtime.model.core +{ + requires io.aklivity.zilla.runtime.engine; + + exports io.aklivity.zilla.runtime.model.core.config; + + provides io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi + with io.aklivity.zilla.runtime.model.core.internal.config.IntegerModelConfigAdapter, + io.aklivity.zilla.runtime.model.core.internal.config.StringModelConfigAdapter; + + provides io.aklivity.zilla.runtime.engine.model.ModelFactorySpi + with io.aklivity.zilla.runtime.model.core.internal.IntegerModelFactorySpi, + io.aklivity.zilla.runtime.model.core.internal.StringModelFactorySpi; +} diff --git a/incubator/model-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi b/incubator/model-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi new file mode 100644 index 0000000000..b81e71d6a3 --- /dev/null +++ b/incubator/model-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi @@ -0,0 +1,2 @@ +io.aklivity.zilla.runtime.model.core.internal.config.IntegerModelConfigAdapter +io.aklivity.zilla.runtime.model.core.internal.config.StringModelConfigAdapter diff --git a/incubator/model-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi b/incubator/model-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi new file mode 100644 index 0000000000..1d8a29a482 --- /dev/null +++ b/incubator/model-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi @@ -0,0 +1,2 @@ +io.aklivity.zilla.runtime.model.core.internal.IntegerModelFactorySpi +io.aklivity.zilla.runtime.model.core.internal.StringModelFactorySpi diff --git a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/IntegerValidatorTest.java b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/IntegerConverterTest.java similarity index 63% rename from incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/IntegerValidatorTest.java rename to incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/IntegerConverterTest.java index 3f76925cfd..072a0b1cd5 100644 --- a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/IntegerValidatorTest.java +++ b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/IntegerConverterTest.java @@ -12,21 +12,21 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core; +package io.aklivity.zilla.runtime.model.core.internal; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; import org.agrona.DirectBuffer; import org.agrona.concurrent.UnsafeBuffer; import org.junit.Test; -import io.aklivity.zilla.runtime.validator.core.config.IntegerValidatorConfig; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.core.config.IntegerModelConfig; -public class IntegerValidatorTest +public class IntegerConverterTest { - private final IntegerValidatorConfig config = new IntegerValidatorConfig(); - private final IntegerValidator validator = new IntegerValidator(config); + private final IntegerModelConfig config = new IntegerModelConfig(); + private final IntegerConverterHandler converter = new IntegerConverterHandler(config); @Test public void shouldVerifyValidInteger() @@ -35,7 +35,7 @@ public void shouldVerifyValidInteger() byte[] bytes = {0, 0, 0, 42}; data.wrap(bytes, 0, bytes.length); - assertTrue(validator.read(data, 0, data.capacity())); + assertEquals(data.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); } @Test @@ -45,6 +45,6 @@ public void shouldVerifyInvalidInteger() byte[] bytes = "Not an Integer".getBytes(); data.wrap(bytes, 0, bytes.length); - assertFalse(validator.write(data, 0, data.capacity())); + assertEquals(-1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); } } diff --git a/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModelFactoryTest.java b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModelFactoryTest.java new file mode 100644 index 0000000000..385add14d1 --- /dev/null +++ b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/IntegerModelFactoryTest.java @@ -0,0 +1,49 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.mock; + +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.engine.model.ModelFactory; +import io.aklivity.zilla.runtime.model.core.config.IntegerModelConfig; + +public class IntegerModelFactoryTest +{ + @Test + public void shouldCreateReader() + { + Configuration config = new Configuration(); + ModelFactory factory = ModelFactory.instantiate(); + Model model = factory.create("integer", config); + + ModelContext context = new IntegerModelContext(mock(EngineContext.class)); + + ModelConfig modelConfig = IntegerModelConfig.builder().build(); + + assertThat(model, instanceOf(IntegerModel.class)); + assertThat(context.supplyReadConverterHandler(modelConfig), instanceOf(IntegerConverterHandler.class)); + assertThat(context.supplyWriteConverterHandler(modelConfig), instanceOf(IntegerConverterHandler.class)); + assertThat(context.supplyValidatorHandler(modelConfig), instanceOf(IntegerValidatorHandler.class)); + } +} diff --git a/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/IntegerValidatorTest.java b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/IntegerValidatorTest.java new file mode 100644 index 0000000000..f3244ebb8f --- /dev/null +++ b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/IntegerValidatorTest.java @@ -0,0 +1,87 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.core.config.IntegerModelConfig; + +public class IntegerValidatorTest +{ + private final IntegerModelConfig config = IntegerModelConfig.builder().build(); + private final IntegerValidatorHandler handler = new IntegerValidatorHandler(config); + + @Test + public void shouldVerifyValidIntegerCompleteMessage() + { + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0, 0, 0, 42}; + data.wrap(bytes, 0, bytes.length); + assertTrue(handler.validate(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyValidIntegerFragmentedMessage() + { + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0, 0, 0, 42}; + + data.wrap(bytes, 0, 2); + assertTrue(handler.validate(ValidatorHandler.FLAGS_INIT, data, 0, data.capacity(), ValueConsumer.NOP)); + + data.wrap(bytes, 2, 1); + assertTrue(handler.validate(0x00, data, 0, data.capacity(), ValueConsumer.NOP)); + + data.wrap(bytes, 3, 1); + assertTrue(handler.validate(ValidatorHandler.FLAGS_FIN, data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyInvalidIntegerCompleteMessage() + { + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = "Not an Integer".getBytes(); + data.wrap(bytes, 0, bytes.length); + assertFalse(handler.validate(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyInValidIntegerFragmentedMessage() + { + DirectBuffer data = new UnsafeBuffer(); + + byte[] firstFragment = {0, 0, 0}; + data.wrap(firstFragment, 0, firstFragment.length); + assertTrue(handler.validate(ValidatorHandler.FLAGS_INIT, data, 0, data.capacity(), ValueConsumer.NOP)); + + byte[] secondFragment = {0, 0}; + data.wrap(secondFragment, 0, secondFragment.length); + assertFalse(handler.validate(0x00, data, 0, data.capacity(), ValueConsumer.NOP)); + + byte[] finalFragment = {42}; + data.wrap(finalFragment, 0, finalFragment.length); + assertFalse(handler.validate(ValidatorHandler.FLAGS_FIN, data, 0, data.capacity(), ValueConsumer.NOP)); + } +} diff --git a/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringConverterTest.java b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringConverterTest.java new file mode 100644 index 0000000000..c9f93d1aea --- /dev/null +++ b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringConverterTest.java @@ -0,0 +1,150 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import static org.junit.Assert.assertEquals; + +import java.nio.charset.StandardCharsets; + +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.core.config.StringModelConfig; + +public class StringConverterTest +{ + @Test + public void shouldVerifyValidUtf8() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_8") + .build(); + StringConverterHandler converter = new StringConverterHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = "Valid String".getBytes(); + data.wrap(bytes, 0, bytes.length); + assertEquals(data.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyInvalidUtf8() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_8") + .build(); + StringConverterHandler converter = new StringConverterHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {(byte) 0xc0}; + data.wrap(bytes, 0, bytes.length); + assertEquals(-1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyValidUtf16() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_16") + .build(); + StringConverterHandler converter = new StringConverterHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = "Valid String".getBytes(StandardCharsets.UTF_16); + data.wrap(bytes, 0, bytes.length); + + assertEquals(data.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyIncompleteUtf16() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_16") + .build(); + StringConverterHandler converter = new StringConverterHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x48}; + data.wrap(bytes, 0, bytes.length); + assertEquals(-1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyIncompleteSurrogatePairUtf16() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_16") + .build(); + StringConverterHandler converter = new StringConverterHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {(byte) 0xD8, (byte) 0x00}; + data.wrap(bytes, 0, bytes.length); + assertEquals(-1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyInvalidSecondSurrogateUtf16() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_16") + .build(); + StringConverterHandler converter = new StringConverterHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {(byte) 0xDC, (byte) 0x01}; + data.wrap(bytes, 0, bytes.length); + assertEquals(-1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyUnexpectedSecondSurrogateUtf16() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_16") + .build(); + StringConverterHandler converter = new StringConverterHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {(byte) 0xDC, (byte) 0x80}; + data.wrap(bytes, 0, bytes.length); + assertEquals(-1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyValidMixedUtf16() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_16") + .build(); + StringConverterHandler converter = new StringConverterHandler(config); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0, 72, 0, 101, 0, 108, 0, 108, 0, 111, 65, 66, 67}; + data.wrap(bytes, 0, bytes.length); + assertEquals(-1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } +} diff --git a/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringEncodingTest.java b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringEncodingTest.java new file mode 100644 index 0000000000..5ff69589ab --- /dev/null +++ b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringEncodingTest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import static io.aklivity.zilla.runtime.engine.model.ValidatorHandler.FLAGS_COMPLETE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.nio.charset.StandardCharsets; + +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Test; + +public class StringEncodingTest +{ + @Test + public void shouldVerifyValidUTF8() + { + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = "Valid String".getBytes(); + data.wrap(bytes, 0, bytes.length); + + assertTrue(StringEncoding.UTF_8.validate(data, 0, bytes.length)); + + assertTrue(StringValidatorEncoding.UTF_8.validate(FLAGS_COMPLETE, data, 0, bytes.length)); + } + + @Test + public void shouldVerifyValidUTF16() + { + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = "Valid String".getBytes(StandardCharsets.UTF_16); + data.wrap(bytes, 0, bytes.length); + + assertTrue(StringEncoding.UTF_16.validate(data, 0, bytes.length)); + } + + @Test + public void shouldVerifyInvalidUTF16() + { + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {(byte) 0xD8, (byte) 0x00}; + data.wrap(bytes, 0, bytes.length); + + assertFalse(StringEncoding.UTF_16.validate(data, 0, bytes.length)); + } + + @Test + public void shouldVerifyStringEncodingOf() + { + assertEquals(StringEncoding.UTF_8, StringEncoding.of("utf_8")); + assertEquals(StringEncoding.UTF_16, StringEncoding.of("utf_16")); + + assertEquals(StringValidatorEncoding.UTF_8, StringValidatorEncoding.of("utf_8")); + } +} diff --git a/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringModelFactoryTest.java b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringModelFactoryTest.java new file mode 100644 index 0000000000..2913220059 --- /dev/null +++ b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringModelFactoryTest.java @@ -0,0 +1,49 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.mock; + +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.engine.model.ModelFactory; +import io.aklivity.zilla.runtime.model.core.config.StringModelConfig; + +public class StringModelFactoryTest +{ + @Test + public void shouldCreateReader() + { + Configuration config = new Configuration(); + ModelFactory factory = ModelFactory.instantiate(); + Model model = factory.create("string", config); + + ModelContext context = new StringModelContext(mock(EngineContext.class)); + + ModelConfig modelConfig = StringModelConfig.builder().encoding("utf_8").build(); + + assertThat(model, instanceOf(StringModel.class)); + assertThat(context.supplyReadConverterHandler(modelConfig), instanceOf(StringConverterHandler.class)); + assertThat(context.supplyWriteConverterHandler(modelConfig), instanceOf(StringConverterHandler.class)); + assertThat(context.supplyValidatorHandler(modelConfig), instanceOf(StringValidatorHandler.class)); + } +} diff --git a/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringValidatorTest.java b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringValidatorTest.java new file mode 100644 index 0000000000..34db2cc059 --- /dev/null +++ b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/StringValidatorTest.java @@ -0,0 +1,108 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.core.internal; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.core.config.StringModelConfig; + +public class StringValidatorTest +{ + @Test + public void shouldVerifyValidUtf8() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_8") + .build(); + StringValidatorHandler handler = new StringValidatorHandler(config); + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = "Valid String".getBytes(); + data.wrap(bytes, 0, bytes.length); + assertTrue(handler.validate(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyFragmentedValidUtf8() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_8") + .build(); + StringValidatorHandler handler = new StringValidatorHandler(config); + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = "Valid String".getBytes(); + + data.wrap(bytes, 0, 6); + assertTrue(handler.validate(ValidatorHandler.FLAGS_INIT, data, 0, data.capacity(), ValueConsumer.NOP)); + + data.wrap(bytes, 6, 5); + assertTrue(handler.validate(0x00, data, 0, data.capacity(), ValueConsumer.NOP)); + + data.wrap(bytes, 11, 1); + assertTrue(handler.validate(ValidatorHandler.FLAGS_FIN, data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyFragmentedInValidUtf8() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_8") + .build(); + StringValidatorHandler handler = new StringValidatorHandler(config); + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = { + (byte) 'S', (byte) 't', (byte) 'r', (byte) 'i', (byte) 'n', (byte) 'g', + (byte) 0xc0, (byte) 'V', (byte) 'a', (byte) 'l', (byte) 'i', + (byte) 'd' + }; + + data.wrap(bytes, 0, 6); + assertTrue(handler.validate(ValidatorHandler.FLAGS_INIT, data, 0, data.capacity(), ValueConsumer.NOP)); + + data.wrap(bytes, 6, 5); + assertFalse(handler.validate(0x00, data, 0, data.capacity(), ValueConsumer.NOP)); + + data.wrap(bytes, 11, 1); + assertFalse(handler.validate(ValidatorHandler.FLAGS_FIN, data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyWithPendingCharBytes() + { + StringModelConfig config = StringModelConfig.builder() + .encoding("utf_8") + .build(); + StringValidatorHandler handler = new StringValidatorHandler(config); + UnsafeBuffer data = new UnsafeBuffer(); + + byte[] bytes = {(byte) 0xc3, (byte) 0xa4}; + + data.wrap(bytes, 0, 1); + assertTrue(handler.validate(ValidatorHandler.FLAGS_INIT, data, 0, data.capacity(), ValueConsumer.NOP)); + + data.wrap(bytes, 1, 1); + assertTrue(handler.validate(ValidatorHandler.FLAGS_FIN, data, 0, data.capacity(), ValueConsumer.NOP)); + + } +} diff --git a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfigAdapterTest.java b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/config/IntegerModelConfigAdapterTest.java similarity index 67% rename from incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfigAdapterTest.java rename to incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/config/IntegerModelConfigAdapterTest.java index da6befc886..384e75c76b 100644 --- a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfigAdapterTest.java +++ b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/config/IntegerModelConfigAdapterTest.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core.config; +package io.aklivity.zilla.runtime.model.core.internal.config; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; @@ -26,7 +26,9 @@ import org.junit.Before; import org.junit.Test; -public class IntegerValidatorConfigAdapterTest +import io.aklivity.zilla.runtime.model.core.config.IntegerModelConfig; + +public class IntegerModelConfigAdapterTest { private Jsonb jsonb; @@ -34,36 +36,36 @@ public class IntegerValidatorConfigAdapterTest public void initJson() { JsonbConfig config = new JsonbConfig() - .withAdapters(new IntegerValidatorConfigAdapter()); + .withAdapters(new IntegerModelConfigAdapter()); jsonb = JsonbBuilder.create(config); } @Test - public void shouldReadIntegerValidator() + public void shouldReadIntegerconverter() { // GIVEN String json = "{" + - "\"type\": \"integer\"" + + "\"model\": \"integer\"" + "}"; // WHEN - IntegerValidatorConfig validator = jsonb.fromJson(json, IntegerValidatorConfig.class); + IntegerModelConfig converter = jsonb.fromJson(json, IntegerModelConfig.class); // THEN - assertThat(validator, not(nullValue())); - assertThat(validator.type, equalTo("integer")); + assertThat(converter, not(nullValue())); + assertThat(converter.model, equalTo("integer")); } @Test - public void shouldWriteIntegerValidator() + public void shouldWriteIntegerconverter() { // GIVEN String expectedJson = "\"integer\""; - IntegerValidatorConfig validator = IntegerValidatorConfig.builder().build(); + IntegerModelConfig converter = IntegerModelConfig.builder().build(); // WHEN - String json = jsonb.toJson(validator); + String json = jsonb.toJson(converter); // THEN assertThat(json, not(nullValue())); diff --git a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfigAdapterTest.java b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/config/StringModelConfigAdapterTest.java similarity index 66% rename from incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfigAdapterTest.java rename to incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/config/StringModelConfigAdapterTest.java index 89ab178016..a05af5433c 100644 --- a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/config/StringValidatorConfigAdapterTest.java +++ b/incubator/model-core/src/test/java/io/aklivity/zilla/runtime/model/core/internal/config/StringModelConfigAdapterTest.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core.config; +package io.aklivity.zilla.runtime.model.core.internal.config; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; @@ -26,7 +26,9 @@ import org.junit.Before; import org.junit.Test; -public class StringValidatorConfigAdapterTest +import io.aklivity.zilla.runtime.model.core.config.StringModelConfig; + +public class StringModelConfigAdapterTest { private Jsonb jsonb; @@ -34,38 +36,38 @@ public class StringValidatorConfigAdapterTest public void initJson() { JsonbConfig config = new JsonbConfig() - .withAdapters(new StringValidatorConfigAdapter()); + .withAdapters(new StringModelConfigAdapter()); jsonb = JsonbBuilder.create(config); } @Test - public void shouldReadStringValidator() + public void shouldReadStringconverter() { // GIVEN String json = "{" + - "\"type\": \"string\"," + + "\"model\": \"string\"," + "\"encoding\": \"utf_8\"" + "}"; // WHEN - StringValidatorConfig validator = jsonb.fromJson(json, StringValidatorConfig.class); + StringModelConfig model = jsonb.fromJson(json, StringModelConfig.class); // THEN - assertThat(validator, not(nullValue())); - assertThat(validator.type, equalTo("string")); - assertThat(validator.encoding, equalTo("utf_8")); + assertThat(model, not(nullValue())); + assertThat(model.model, equalTo("string")); + assertThat(model.encoding, equalTo("utf_8")); } @Test - public void shouldWriteDefaultEncodingStringValidator() + public void shouldWriteDefaultEncodingStringconverter() { // GIVEN String expectedJson = "\"string\""; - StringValidatorConfig validator = StringValidatorConfig.builder().build(); + StringModelConfig converter = StringModelConfig.builder().build(); // WHEN - String json = jsonb.toJson(validator); + String json = jsonb.toJson(converter); // THEN assertThat(json, not(nullValue())); @@ -73,20 +75,20 @@ public void shouldWriteDefaultEncodingStringValidator() } @Test - public void shouldWriteStringValidator() + public void shouldWriteStringconverter() { // GIVEN String expectedJson = "{" + - "\"type\":\"string\"," + + "\"model\":\"string\"," + "\"encoding\":\"utf_16\"" + "}"; - StringValidatorConfig validator = StringValidatorConfig.builder() + StringModelConfig model = StringModelConfig.builder() .encoding("utf_16") .build(); // WHEN - String json = jsonb.toJson(validator); + String json = jsonb.toJson(model); // THEN assertThat(json, not(nullValue())); diff --git a/incubator/validator-json.spec/COPYRIGHT b/incubator/model-json.spec/COPYRIGHT similarity index 100% rename from incubator/validator-json.spec/COPYRIGHT rename to incubator/model-json.spec/COPYRIGHT diff --git a/incubator/validator-json.spec/LICENSE b/incubator/model-json.spec/LICENSE similarity index 100% rename from incubator/validator-json.spec/LICENSE rename to incubator/model-json.spec/LICENSE diff --git a/incubator/validator-json.spec/NOTICE b/incubator/model-json.spec/NOTICE similarity index 100% rename from incubator/validator-json.spec/NOTICE rename to incubator/model-json.spec/NOTICE diff --git a/incubator/validator-json.spec/NOTICE.template b/incubator/model-json.spec/NOTICE.template similarity index 100% rename from incubator/validator-json.spec/NOTICE.template rename to incubator/model-json.spec/NOTICE.template diff --git a/incubator/validator-json.spec/mvnw b/incubator/model-json.spec/mvnw similarity index 100% rename from incubator/validator-json.spec/mvnw rename to incubator/model-json.spec/mvnw diff --git a/incubator/validator-json.spec/mvnw.cmd b/incubator/model-json.spec/mvnw.cmd similarity index 100% rename from incubator/validator-json.spec/mvnw.cmd rename to incubator/model-json.spec/mvnw.cmd diff --git a/incubator/validator-json.spec/pom.xml b/incubator/model-json.spec/pom.xml similarity index 96% rename from incubator/validator-json.spec/pom.xml rename to incubator/model-json.spec/pom.xml index f66dd0bef5..ca5acf9951 100644 --- a/incubator/validator-json.spec/pom.xml +++ b/incubator/model-json.spec/pom.xml @@ -8,12 +8,12 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml -validator-json.spec -zilla::incubator::validator-json.spec +model-json.spec +zilla::incubator::model-json.spec diff --git a/incubator/validator-json.spec/src/main/moditect/module-info.java b/incubator/model-json.spec/src/main/moditect/module-info.java similarity index 92% rename from incubator/validator-json.spec/src/main/moditect/module-info.java rename to incubator/model-json.spec/src/main/moditect/module-info.java index 08a27de593..a93c2004e4 100644 --- a/incubator/validator-json.spec/src/main/moditect/module-info.java +++ b/incubator/model-json.spec/src/main/moditect/module-info.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -open module io.aklivity.zilla.specs.validator.json +open module io.aklivity.zilla.specs.model.json { requires transitive io.aklivity.zilla.specs.engine; } diff --git a/incubator/model-json.spec/src/main/scripts/io/aklivity/zilla/specs/model/json/config/model.yaml b/incubator/model-json.spec/src/main/scripts/io/aklivity/zilla/specs/model/json/config/model.yaml new file mode 100644 index 0000000000..bffde5db5b --- /dev/null +++ b/incubator/model-json.spec/src/main/scripts/io/aklivity/zilla/specs/model/json/config/model.yaml @@ -0,0 +1,49 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +--- +name: test +catalogs: + test0: + type: test + options: + schema: | + { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "status": { + "type": "string" + } + }, + "required": [ + "id", + "status" + ] + } +bindings: + test: + kind: server + type: test + options: + value: + model: json + catalog: + catalog0: + - subject: test0 + version: latest + exit: test diff --git a/incubator/model-json.spec/src/main/scripts/io/aklivity/zilla/specs/model/json/schema/json.schema.patch.json b/incubator/model-json.spec/src/main/scripts/io/aklivity/zilla/specs/model/json/schema/json.schema.patch.json new file mode 100644 index 0000000000..b9469bc6dc --- /dev/null +++ b/incubator/model-json.spec/src/main/scripts/io/aklivity/zilla/specs/model/json/schema/json.schema.patch.json @@ -0,0 +1,254 @@ +[ + { + "op": "add", + "path": "/$defs/converter/types/enum/-", + "value": "json" + }, + { + "op": "add", + "path": "/$defs/converter/allOf/-", + "value": + { + "if": + { + "properties": + { + "model": + { + "const": "json" + } + } + }, + "then": + { + "properties": + { + "model": + { + "const": "json" + }, + "catalog": + { + "type": "object", + "patternProperties": + { + "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": + { + "type": "array", + "items": + { + "oneOf": + [ + { + "type": "object", + "properties": + { + "id": + { + "type": "integer" + } + }, + "required": + [ + "id" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "schema": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "schema" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "strategy": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "strategy" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "subject": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "subject" + ], + "additionalProperties": false + } + ] + } + } + }, + "maxProperties": 1 + } + }, + "additionalProperties": false + } + } + }, + { + "op": "add", + "path": "/$defs/validator/types/enum/-", + "value": "json" + }, + { + "op": "add", + "path": "/$defs/validator/allOf/-", + "value": + { + "if": + { + "properties": + { + "model": + { + "const": "json" + } + } + }, + "then": + { + "properties": + { + "model": + { + "const": "json" + }, + "catalog": + { + "type": "object", + "patternProperties": + { + "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": + { + "type": "array", + "items": + { + "oneOf": + [ + { + "type": "object", + "properties": + { + "id": + { + "type": "integer" + } + }, + "required": + [ + "id" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "schema": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "schema" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "strategy": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "strategy" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "subject": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "subject" + ], + "additionalProperties": false + } + ] + } + } + }, + "maxProperties": 1 + } + }, + "additionalProperties": false + } + } + } +] diff --git a/incubator/model-json.spec/src/test/java/io/aklivity/zilla/specs/model/json/config/SchemaTest.java b/incubator/model-json.spec/src/test/java/io/aklivity/zilla/specs/model/json/config/SchemaTest.java new file mode 100644 index 0000000000..2d0ad426ba --- /dev/null +++ b/incubator/model-json.spec/src/test/java/io/aklivity/zilla/specs/model/json/config/SchemaTest.java @@ -0,0 +1,44 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.specs.model.json.config; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +import jakarta.json.JsonObject; + +import org.junit.Rule; +import org.junit.Test; + +import io.aklivity.zilla.specs.engine.config.ConfigSchemaRule; + +public class SchemaTest +{ + @Rule + public final ConfigSchemaRule schema = new ConfigSchemaRule() + .schemaPatch("io/aklivity/zilla/specs/engine/schema/binding/test.schema.patch.json") + .schemaPatch("io/aklivity/zilla/specs/engine/schema/catalog/test.schema.patch.json") + .schemaPatch("io/aklivity/zilla/specs/model/json/schema/json.schema.patch.json") + .configurationRoot("io/aklivity/zilla/specs/model/json/config"); + + @Test + public void shouldValidateCatalog() + { + JsonObject config = schema.validate("model.yaml"); + + assertThat(config, not(nullValue())); + } +} diff --git a/incubator/validator-json/COPYRIGHT b/incubator/model-json/COPYRIGHT similarity index 100% rename from incubator/validator-json/COPYRIGHT rename to incubator/model-json/COPYRIGHT diff --git a/incubator/validator-json/LICENSE b/incubator/model-json/LICENSE similarity index 100% rename from incubator/validator-json/LICENSE rename to incubator/model-json/LICENSE diff --git a/incubator/validator-json/NOTICE b/incubator/model-json/NOTICE similarity index 100% rename from incubator/validator-json/NOTICE rename to incubator/model-json/NOTICE diff --git a/incubator/validator-json/NOTICE.template b/incubator/model-json/NOTICE.template similarity index 100% rename from incubator/validator-json/NOTICE.template rename to incubator/model-json/NOTICE.template diff --git a/incubator/validator-json/mvnw b/incubator/model-json/mvnw similarity index 100% rename from incubator/validator-json/mvnw rename to incubator/model-json/mvnw diff --git a/incubator/validator-json/mvnw.cmd b/incubator/model-json/mvnw.cmd similarity index 100% rename from incubator/validator-json/mvnw.cmd rename to incubator/model-json/mvnw.cmd diff --git a/incubator/validator-json/pom.xml b/incubator/model-json/pom.xml similarity index 92% rename from incubator/validator-json/pom.xml rename to incubator/model-json/pom.xml index 903912d79c..089e968930 100644 --- a/incubator/validator-json/pom.xml +++ b/incubator/model-json/pom.xml @@ -6,12 +6,12 @@ io.aklivity.zilla incubator - 0.9.66 + 0.9.67 ../pom.xml -validator-json -zilla::incubator::validator-json +model-json +zilla::incubator::model-json @@ -24,14 +24,14 @@ 11 11 - 0.83 + 0.90 0 ${project.groupId} - validator-json.spec + model-json.spec ${project.version} provided @@ -98,16 +98,16 @@ ${project.groupId} - validator-json.spec + model-json.spec - ^\Qio/aklivity/zilla/specs/validator/json/\E - io/aklivity/zilla/runtime/validator/json/ + ^\Qio/aklivity/zilla/specs/model/json/\E + io/aklivity/zilla/runtime/model/json/internal/ - io/aklivity/zilla/specs/validator/json/schema/json.schema.patch.json + io/aklivity/zilla/specs/model/json/schema/json.schema.patch.json ${project.build.directory}/classes diff --git a/incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfig.java b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/config/JsonModelConfig.java similarity index 52% rename from incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfig.java rename to incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/config/JsonModelConfig.java index 97821ad23b..b2dbbcfb23 100644 --- a/incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfig.java +++ b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/config/JsonModelConfig.java @@ -12,33 +12,34 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.json.config; +package io.aklivity.zilla.runtime.model.json.config; import java.util.List; import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; -public final class JsonValidatorConfig extends ValidatorConfig +public final class JsonModelConfig extends ModelConfig { - public final List catalogs; + public final String subject; - JsonValidatorConfig( - List catalogs) + public JsonModelConfig( + List cataloged, + String subject) { - super("json"); - this.catalogs = catalogs; + super("json", cataloged); + this.subject = subject; } - public static JsonValidatorConfigBuilder builder( - Function mapper) + public static JsonModelConfigBuilder builder( + Function mapper) { - return new JsonValidatorConfigBuilder<>(mapper::apply); + return new JsonModelConfigBuilder<>(mapper::apply); } - public static JsonValidatorConfigBuilder builder() + public static JsonModelConfigBuilder builder() { - return new JsonValidatorConfigBuilder<>(JsonValidatorConfig.class::cast); + return new JsonModelConfigBuilder<>(JsonModelConfig.class::cast); } } diff --git a/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfigBuilder.java b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/config/JsonModelConfigBuilder.java similarity index 68% rename from incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfigBuilder.java rename to incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/config/JsonModelConfigBuilder.java index 5951a68d7b..ea7e7bc99c 100644 --- a/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfigBuilder.java +++ b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/config/JsonModelConfigBuilder.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.avro.config; +package io.aklivity.zilla.runtime.model.json.config; import java.util.LinkedList; import java.util.List; @@ -22,39 +22,39 @@ import io.aklivity.zilla.runtime.engine.config.CatalogedConfigBuilder; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public class AvroValidatorConfigBuilder extends ConfigBuilder> +public class JsonModelConfigBuilder extends ConfigBuilder> { - private final Function mapper; + private final Function mapper; private List catalogs; private String subject; - AvroValidatorConfigBuilder( - Function mapper) + JsonModelConfigBuilder( + Function mapper) { this.mapper = mapper; } @Override @SuppressWarnings("unchecked") - protected Class> thisType() + protected Class> thisType() { - return (Class>) getClass(); + return (Class>) getClass(); } - public AvroValidatorConfigBuilder subject( - String subject) + public CatalogedConfigBuilder> catalog() { - this.subject = subject; - return this; + return CatalogedConfig.builder(this::catalog); } - public CatalogedConfigBuilder> catalog() + public JsonModelConfigBuilder subject( + String subject) { - return CatalogedConfig.builder(this::catalog); + this.subject = subject; + return this; } - public AvroValidatorConfigBuilder catalog( + public JsonModelConfigBuilder catalog( CatalogedConfig catalog) { if (catalogs == null) @@ -68,6 +68,6 @@ public AvroValidatorConfigBuilder catalog( @Override public T build() { - return mapper.apply(new AvroValidatorConfig(catalogs, subject)); + return mapper.apply(new JsonModelConfig(catalogs, subject)); } } diff --git a/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModel.java b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModel.java new file mode 100644 index 0000000000..4d0ac6d9a8 --- /dev/null +++ b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModel.java @@ -0,0 +1,45 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.json.internal; + +import java.net.URL; + +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; + +public class JsonModel implements Model +{ + public static final String NAME = "json"; + + @Override + public String name() + { + return NAME; + } + + @Override + public ModelContext supply( + EngineContext context) + { + return new JsonModelContext(context); + } + + @Override + public URL type() + { + return getClass().getResource("schema/json.schema.patch.json"); + } +} diff --git a/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelContext.java b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelContext.java new file mode 100644 index 0000000000..ac77d3e9b7 --- /dev/null +++ b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelContext.java @@ -0,0 +1,56 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.json.internal; + +import java.util.function.LongFunction; + +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; + +public class JsonModelContext implements ModelContext +{ + private final LongFunction supplyCatalog; + + public JsonModelContext(EngineContext context) + { + this.supplyCatalog = context::supplyCatalog; + } + + @Override + public ConverterHandler supplyReadConverterHandler( + ModelConfig config) + { + return new JsonReadConverterHandler(JsonModelConfig.class.cast(config), supplyCatalog); + } + + @Override + public ConverterHandler supplyWriteConverterHandler( + ModelConfig config) + { + return new JsonWriteConverterHandler(JsonModelConfig.class.cast(config), supplyCatalog); + } + + @Override + public ValidatorHandler supplyValidatorHandler( + ModelConfig config) + { + return new JsonValidatorHandler(JsonModelConfig.class.cast(config), supplyCatalog); + } +} diff --git a/incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/JsonValidatorFactory.java b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelFactorySpi.java similarity index 51% rename from incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/JsonValidatorFactory.java rename to incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelFactorySpi.java index 9d8372b195..86795f2064 100644 --- a/incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/JsonValidatorFactory.java +++ b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelFactorySpi.java @@ -12,26 +12,22 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.json; +package io.aklivity.zilla.runtime.model.json.internal; import java.net.URL; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; import io.aklivity.zilla.runtime.common.feature.Incubating; -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi; -import io.aklivity.zilla.runtime.validator.json.config.JsonValidatorConfig; +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelFactorySpi; @Incubating -public final class JsonValidatorFactory implements ValidatorFactorySpi +public final class JsonModelFactorySpi implements ModelFactorySpi { @Override public String type() { - return "json"; + return JsonModel.NAME; } public URL schema() @@ -40,11 +36,9 @@ public URL schema() } @Override - public Validator create( - ValidatorConfig config, - ToLongFunction resolveId, - LongFunction supplyCatalog) + public Model create( + Configuration config) { - return new JsonValidator(JsonValidatorConfig.class.cast(config), resolveId, supplyCatalog); + return new JsonModel(); } } diff --git a/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelHandler.java b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelHandler.java new file mode 100644 index 0000000000..e86b7dfb3c --- /dev/null +++ b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelHandler.java @@ -0,0 +1,128 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.json.internal; + +import java.io.StringReader; +import java.util.function.LongFunction; + +import jakarta.json.spi.JsonProvider; +import jakarta.json.stream.JsonParser; +import jakarta.json.stream.JsonParserFactory; + +import org.agrona.DirectBuffer; +import org.agrona.collections.Int2ObjectCache; +import org.agrona.io.DirectBufferInputStream; +import org.leadpony.justify.api.JsonSchema; +import org.leadpony.justify.api.JsonSchemaReader; +import org.leadpony.justify.api.JsonValidatingException; +import org.leadpony.justify.api.JsonValidationService; +import org.leadpony.justify.api.ProblemHandler; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.SchemaConfig; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; + +public abstract class JsonModelHandler +{ + protected final SchemaConfig catalog; + protected final CatalogHandler handler; + protected final String subject; + + private final Int2ObjectCache schemas; + private final Int2ObjectCache providers; + private final JsonProvider schemaProvider; + private final JsonValidationService service; + private final JsonParserFactory factory; + private DirectBufferInputStream in; + + public JsonModelHandler( + JsonModelConfig config, + LongFunction supplyCatalog) + { + this.schemaProvider = JsonProvider.provider(); + this.service = JsonValidationService.newInstance(); + this.factory = schemaProvider.createParserFactory(null); + CatalogedConfig cataloged = config.cataloged.get(0); + this.catalog = cataloged.schemas.size() != 0 ? cataloged.schemas.get(0) : null; + this.handler = supplyCatalog.apply(cataloged.id); + this.subject = catalog != null && catalog.subject != null + ? catalog.subject + : config.subject; + this.schemas = new Int2ObjectCache<>(1, 1024, i -> {}); + this.providers = new Int2ObjectCache<>(1, 1024, i -> {}); + this.in = new DirectBufferInputStream(); + } + + protected final boolean validate( + int schemaId, + DirectBuffer buffer, + int index, + int length) + { + boolean status = false; + try + { + JsonProvider provider = supplyProvider(schemaId); + in.wrap(buffer, index, length); + provider.createReader(in).readValue(); + status = true; + } + catch (JsonValidatingException ex) + { + ex.printStackTrace(); + } + return status; + } + + protected JsonProvider supplyProvider( + int schemaId) + { + return providers.computeIfAbsent(schemaId, this::createProvider); + } + + private JsonSchema supplySchema( + int schemaId) + { + return schemas.computeIfAbsent(schemaId, this::resolveSchema); + } + + private JsonSchema resolveSchema( + int schemaId) + { + JsonSchema schema = null; + String schemaText = handler.resolve(schemaId); + if (schemaText != null) + { + JsonParser schemaParser = factory.createParser(new StringReader(schemaText)); + JsonSchemaReader reader = service.createSchemaReader(schemaParser); + schema = reader.read(); + } + + return schema; + } + + private JsonProvider createProvider( + int schemaId) + { + JsonSchema schema = supplySchema(schemaId); + JsonProvider provider = null; + if (schema != null) + { + provider = service.createJsonProvider(schema, parser -> ProblemHandler.throwing()); + } + return provider; + } +} diff --git a/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonReadConverterHandler.java b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonReadConverterHandler.java new file mode 100644 index 0000000000..ad62353542 --- /dev/null +++ b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonReadConverterHandler.java @@ -0,0 +1,75 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.json.internal; + +import static io.aklivity.zilla.runtime.engine.catalog.CatalogHandler.NO_SCHEMA_ID; + +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; + +public class JsonReadConverterHandler extends JsonModelHandler implements ConverterHandler +{ + public JsonReadConverterHandler( + JsonModelConfig config, + LongFunction supplyCatalog) + { + super(config, supplyCatalog); + } + + @Override + public int convert( + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + return handler.decode(data, index, length, next, this::decodePayload); + } + + private int decodePayload( + int schemaId, + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + int valLength = -1; + + if (schemaId == NO_SCHEMA_ID) + { + if (catalog.id != NO_SCHEMA_ID) + { + schemaId = catalog.id; + } + else + { + schemaId = handler.resolve(subject, catalog.version); + } + } + + if (validate(schemaId, data, index, length)) + { + next.accept(data, index, length); + valLength = length; + } + return valLength; + } +} diff --git a/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonValidatorHandler.java b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonValidatorHandler.java new file mode 100644 index 0000000000..994601abc4 --- /dev/null +++ b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonValidatorHandler.java @@ -0,0 +1,93 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.json.internal; + +import java.util.function.LongFunction; + +import jakarta.json.spi.JsonProvider; +import jakarta.json.stream.JsonParser; +import jakarta.json.stream.JsonParsingException; + +import org.agrona.DirectBuffer; +import org.agrona.ExpandableDirectByteBuffer; +import org.agrona.io.DirectBufferInputStream; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; + +public class JsonValidatorHandler extends JsonModelHandler implements ValidatorHandler +{ + private final DirectBufferInputStream in; + private final ExpandableDirectByteBuffer buffer; + + private JsonParser parser; + private int progress; + + public JsonValidatorHandler( + JsonModelConfig config, + LongFunction supplyCatalog) + { + super(config, supplyCatalog); + this.buffer = new ExpandableDirectByteBuffer(); + this.in = new DirectBufferInputStream(buffer); + } + + @Override + public boolean validate( + int flags, + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + boolean status = true; + + try + { + if ((flags & FLAGS_INIT) != 0x00) + { + this.progress = 0; + } + + buffer.putBytes(progress, data, index, length); + progress += length; + + if ((flags & FLAGS_FIN) != 0x00) + { + in.wrap(buffer, 0, progress); + + int schemaId = catalog != null && catalog.id > 0 + ? catalog.id + : handler.resolve(subject, catalog.version); + + JsonProvider provider = supplyProvider(schemaId); + parser = provider.createParser(in); + while (parser.hasNext()) + { + parser.next(); + } + } + } + catch (JsonParsingException ex) + { + status = false; + ex.printStackTrace(); + } + + return status; + } +} diff --git a/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonWriteConverterHandler.java b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonWriteConverterHandler.java new file mode 100644 index 0000000000..9286f1eba2 --- /dev/null +++ b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/JsonWriteConverterHandler.java @@ -0,0 +1,63 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.json.internal; + +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; + +public class JsonWriteConverterHandler extends JsonModelHandler implements ConverterHandler +{ + public JsonWriteConverterHandler( + JsonModelConfig config, + LongFunction supplyCatalog) + { + super(config, supplyCatalog); + } + + @Override + public int padding( + DirectBuffer data, + int index, + int length) + { + return handler.encodePadding(); + } + + @Override + public int convert( + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + int valLength = -1; + + int schemaId = catalog != null && catalog.id > 0 + ? catalog.id + : handler.resolve(subject, catalog.version); + + if (validate(schemaId, data, index, length)) + { + valLength = handler.encode(schemaId, data, index, length, next, CatalogHandler.Encoder.IDENTITY); + } + return valLength; + } +} diff --git a/incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfigAdapter.java b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/config/JsonModelConfigAdapter.java similarity index 51% rename from incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfigAdapter.java rename to incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/config/JsonModelConfigAdapter.java index 24ffeabdec..fd38445239 100644 --- a/incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfigAdapter.java +++ b/incubator/model-json/src/main/java/io/aklivity/zilla/runtime/model/json/internal/config/JsonModelConfigAdapter.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.json.config; +package io.aklivity.zilla.runtime.model.json.internal.config; import java.util.LinkedList; import java.util.List; @@ -26,16 +26,18 @@ import jakarta.json.bind.adapter.JsonbAdapter; import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi; import io.aklivity.zilla.runtime.engine.config.SchemaConfig; import io.aklivity.zilla.runtime.engine.config.SchemaConfigAdapter; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; -public final class JsonValidatorConfigAdapter implements ValidatorConfigAdapterSpi, JsonbAdapter +public final class JsonModelConfigAdapter implements ModelConfigAdapterSpi, JsonbAdapter { private static final String JSON = "json"; - private static final String TYPE_NAME = "type"; + private static final String MODEL_NAME = "model"; private static final String CATALOG_NAME = "catalog"; + private static final String SUBJECT_NAME = "subject"; private final SchemaConfigAdapter schema = new SchemaConfigAdapter(); @@ -47,15 +49,15 @@ public String type() @Override public JsonValue adaptToJson( - ValidatorConfig config) + ModelConfig config) { - JsonValidatorConfig validatorConfig = (JsonValidatorConfig) config; - JsonObjectBuilder validator = Json.createObjectBuilder(); - validator.add(TYPE_NAME, JSON); - if (validatorConfig.catalogs != null && !validatorConfig.catalogs.isEmpty()) + JsonModelConfig jsonConfig = (JsonModelConfig) config; + JsonObjectBuilder converter = Json.createObjectBuilder(); + converter.add(MODEL_NAME, JSON); + if (jsonConfig.cataloged != null && !jsonConfig.cataloged.isEmpty()) { JsonObjectBuilder catalogs = Json.createObjectBuilder(); - for (CatalogedConfig catalog : validatorConfig.catalogs) + for (CatalogedConfig catalog : jsonConfig.cataloged) { JsonArrayBuilder array = Json.createArrayBuilder(); for (SchemaConfig schemaItem: catalog.schemas) @@ -64,36 +66,38 @@ public JsonValue adaptToJson( } catalogs.add(catalog.name, array); } - validator.add(CATALOG_NAME, catalogs); + converter.add(CATALOG_NAME, catalogs); } - return validator.build(); + return converter.build(); } @Override - public ValidatorConfig adaptFromJson( + public ModelConfig adaptFromJson( JsonValue value) { JsonObject object = (JsonObject) value; - ValidatorConfig result = null; - if (object.containsKey(CATALOG_NAME)) + + assert object.containsKey(CATALOG_NAME); + + JsonObject catalogsJson = object.getJsonObject(CATALOG_NAME); + List catalogs = new LinkedList<>(); + for (String catalogName: catalogsJson.keySet()) { - JsonObject catalogsJson = object.getJsonObject(CATALOG_NAME); - List catalogs = new LinkedList<>(); - for (String catalogName: catalogsJson.keySet()) + JsonArray schemasJson = catalogsJson.getJsonArray(catalogName); + List schemas = new LinkedList<>(); + for (JsonValue item : schemasJson) { - JsonArray schemasJson = catalogsJson.getJsonArray(catalogName); - List schemas = new LinkedList<>(); - for (JsonValue item : schemasJson) - { - JsonObject schemaJson = (JsonObject) item; - SchemaConfig schemaElement = schema.adaptFromJson(schemaJson); - schemas.add(schemaElement); - } - catalogs.add(new CatalogedConfig(catalogName, schemas)); + JsonObject schemaJson = (JsonObject) item; + SchemaConfig schemaElement = schema.adaptFromJson(schemaJson); + schemas.add(schemaElement); } - - result = new JsonValidatorConfig(catalogs); + catalogs.add(new CatalogedConfig(catalogName, schemas)); } - return result; + + String subject = object.containsKey(SUBJECT_NAME) + ? object.getString(SUBJECT_NAME) + : null; + + return new JsonModelConfig(catalogs, subject); } } diff --git a/incubator/validator-json/src/main/moditect/module-info.java b/incubator/model-json/src/main/moditect/module-info.java similarity index 61% rename from incubator/validator-json/src/main/moditect/module-info.java rename to incubator/model-json/src/main/moditect/module-info.java index 3931d8cd17..3e7d30ffb7 100644 --- a/incubator/validator-json/src/main/moditect/module-info.java +++ b/incubator/model-json/src/main/moditect/module-info.java @@ -12,17 +12,17 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -module io.aklivity.zilla.runtime.validator.json +module io.aklivity.zilla.runtime.model.json { requires io.aklivity.zilla.runtime.engine; requires org.leadpony.justify; - exports io.aklivity.zilla.runtime.validator.json.config; + exports io.aklivity.zilla.runtime.model.json.config; - provides io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi - with io.aklivity.zilla.runtime.validator.json.config.JsonValidatorConfigAdapter; + provides io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi + with io.aklivity.zilla.runtime.model.json.internal.config.JsonModelConfigAdapter; - provides io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi - with io.aklivity.zilla.runtime.validator.json.JsonValidatorFactory; + provides io.aklivity.zilla.runtime.engine.model.ModelFactorySpi + with io.aklivity.zilla.runtime.model.json.internal.JsonModelFactorySpi; } diff --git a/incubator/model-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi b/incubator/model-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi new file mode 100644 index 0000000000..5e6f55c3ba --- /dev/null +++ b/incubator/model-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi @@ -0,0 +1 @@ +io.aklivity.zilla.runtime.model.json.internal.config.JsonModelConfigAdapter diff --git a/incubator/model-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi b/incubator/model-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi new file mode 100644 index 0000000000..816e864ae6 --- /dev/null +++ b/incubator/model-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi @@ -0,0 +1 @@ +io.aklivity.zilla.runtime.model.json.internal.JsonModelFactorySpi diff --git a/incubator/validator-json/src/test/java/io/aklivity/zilla/runtime/validator/json/JsonValidatorTest.java b/incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/JsonConverterTest.java similarity index 61% rename from incubator/validator-json/src/test/java/io/aklivity/zilla/runtime/validator/json/JsonValidatorTest.java rename to incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/JsonConverterTest.java index b19be65beb..a5729989bd 100644 --- a/incubator/validator-json/src/test/java/io/aklivity/zilla/runtime/validator/json/JsonValidatorTest.java +++ b/incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/JsonConverterTest.java @@ -12,18 +12,17 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.json; +package io.aklivity.zilla.runtime.model.json.internal; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DIRECTORY; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; import java.util.Properties; import java.util.function.LongFunction; -import java.util.function.ToLongFunction; import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; import org.agrona.concurrent.UnsafeBuffer; import org.junit.Before; import org.junit.Test; @@ -34,13 +33,12 @@ import io.aklivity.zilla.runtime.engine.catalog.CatalogContext; import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; import io.aklivity.zilla.runtime.engine.config.CatalogConfig; -import io.aklivity.zilla.runtime.engine.internal.LabelManager; -import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; import io.aklivity.zilla.runtime.engine.test.internal.catalog.TestCatalog; import io.aklivity.zilla.runtime.engine.test.internal.catalog.config.TestCatalogOptionsConfig; -import io.aklivity.zilla.runtime.validator.json.config.JsonValidatorConfig; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; -public class JsonValidatorTest +public class JsonConverterTest { private static final String OBJECT_SCHEMA = "{" + "\"type\": \"object\"," + @@ -65,7 +63,7 @@ public class JsonValidatorTest OBJECT_SCHEMA + "}"; - private final JsonValidatorConfig config = JsonValidatorConfig.builder() + private final JsonModelConfig config = JsonModelConfig.builder() .catalog() .name("test0") .schema() @@ -76,8 +74,6 @@ public class JsonValidatorTest .build() .build() .build(); - private LabelManager labels; - private ToLongFunction resolveId; private CatalogContext context; @Before @@ -86,8 +82,6 @@ public void init() Properties properties = new Properties(); properties.setProperty(ENGINE_DIRECTORY.name(), "target/zilla-itests"); EngineConfiguration config = new EngineConfiguration(properties); - labels = new LabelManager(config.directory()); - resolveId = name -> name != null ? NamespacedId.id(1, labels.supplyLabelId(name)) : 0L; Catalog catalog = new TestCatalog(config); context = catalog.supply(mock(EngineContext.class)); } @@ -95,27 +89,36 @@ public void init() @Test public void shouldVerifyValidJsonObject() { - CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", new TestCatalogOptionsConfig(OBJECT_SCHEMA)); + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(OBJECT_SCHEMA) + .build()); LongFunction handler = value -> context.attach(catalogConfig); - JsonValidator validator = new JsonValidator(config, resolveId, handler); + JsonReadConverterHandler converter = new JsonReadConverterHandler(config, handler); DirectBuffer data = new UnsafeBuffer(); - String payload = "{" + - "\"id\": \"123\"," + - "\"status\": \"OK\"" + + String payload = + "{" + + "\"id\": \"123\"," + + "\"status\": \"OK\"" + "}"; byte[] bytes = payload.getBytes(); data.wrap(bytes, 0, bytes.length); - assertTrue(validator.write(data, 0, data.capacity())); + assertEquals(data.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); } @Test public void shouldVerifyValidJsonArray() { - CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", new TestCatalogOptionsConfig(ARRAY_SCHEMA)); + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(ARRAY_SCHEMA) + .build()); LongFunction handler = value -> context.attach(catalogConfig); - JsonValidator validator = new JsonValidator(config, resolveId, handler); + JsonWriteConverterHandler converter = new JsonWriteConverterHandler(config, handler); DirectBuffer data = new UnsafeBuffer(); @@ -128,33 +131,72 @@ public void shouldVerifyValidJsonArray() "]"; byte[] bytes = payload.getBytes(); data.wrap(bytes, 0, bytes.length); - assertTrue(validator.write(data, 0, data.capacity())); + + assertEquals(data.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); } @Test public void shouldVerifyInvalidJsonObject() { - CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", new TestCatalogOptionsConfig(OBJECT_SCHEMA)); + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(OBJECT_SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + JsonReadConverterHandler converter = new JsonReadConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + String payload = + "{" + + "\"id\": 123," + + "\"status\": \"OK\"" + + "}"; + byte[] bytes = payload.getBytes(); + data.wrap(bytes, 0, bytes.length); + + MutableDirectBuffer value = new UnsafeBuffer(new byte[data.capacity() + 5]); + value.putBytes(0, new byte[]{0x00, 0x00, 0x00, 0x00, 0x01}); + value.putBytes(5, bytes); + + assertEquals(-1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldWriteValidJsonData() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(OBJECT_SCHEMA) + .build()); LongFunction handler = value -> context.attach(catalogConfig); - JsonValidator validator = new JsonValidator(config, resolveId, handler); + JsonWriteConverterHandler converter = new JsonWriteConverterHandler(config, handler); DirectBuffer data = new UnsafeBuffer(); - String payload = "{" + - "\"id\": 123," + - "\"status\": \"OK\"" + + String payload = + "{" + + "\"id\": \"123\"," + + "\"status\": \"OK\"" + "}"; byte[] bytes = payload.getBytes(); data.wrap(bytes, 0, bytes.length); - assertFalse(validator.write(data, 0, data.capacity())); + + assertEquals(data.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); } @Test public void shouldVerifyInvalidJsonArray() { - CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", new TestCatalogOptionsConfig(ARRAY_SCHEMA)); + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(ARRAY_SCHEMA) + .build()); LongFunction handler = value -> context.attach(catalogConfig); - JsonValidator validator = new JsonValidator(config, resolveId, handler); + JsonWriteConverterHandler converter = new JsonWriteConverterHandler(config, handler); DirectBuffer data = new UnsafeBuffer(); @@ -167,6 +209,7 @@ public void shouldVerifyInvalidJsonArray() "]"; byte[] bytes = payload.getBytes(); data.wrap(bytes, 0, bytes.length); - assertFalse(validator.write(data, 0, data.capacity())); + + assertEquals(-1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); } } diff --git a/incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelFactorySpiTest.java b/incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelFactorySpiTest.java new file mode 100644 index 0000000000..5aa0afa399 --- /dev/null +++ b/incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/JsonModelFactorySpiTest.java @@ -0,0 +1,60 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.json.internal; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; + +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.engine.model.ModelFactory; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; + +public class JsonModelFactorySpiTest +{ + @Test + public void shouldLoadAndCreate() + { + Configuration config = new Configuration(); + ModelFactory factory = ModelFactory.instantiate(); + Model model = factory.create("json", config); + + ModelContext context = model.supply(mock(EngineContext.class)); + + ModelConfig modelConfig = JsonModelConfig.builder() + .subject("test-value") + .catalog() + .name("test0") + .schema() + .subject("subject1") + .version("latest") + .build() + .build() + .build(); + + assertThat(model, instanceOf(JsonModel.class)); + assertEquals(model.name(), "json"); + assertThat(context.supplyReadConverterHandler(modelConfig), instanceOf(JsonReadConverterHandler.class)); + assertThat(context.supplyWriteConverterHandler(modelConfig), instanceOf(JsonWriteConverterHandler.class)); + assertThat(context.supplyValidatorHandler(modelConfig), instanceOf(JsonValidatorHandler.class)); + } +} diff --git a/incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/JsonValidatorTest.java b/incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/JsonValidatorTest.java new file mode 100644 index 0000000000..12b3fd7aa9 --- /dev/null +++ b/incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/JsonValidatorTest.java @@ -0,0 +1,214 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.json.internal; + +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DIRECTORY; +import static io.aklivity.zilla.runtime.engine.model.ValidatorHandler.FLAGS_FIN; +import static io.aklivity.zilla.runtime.engine.model.ValidatorHandler.FLAGS_INIT; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +import java.util.Properties; +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Before; +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.catalog.Catalog; +import io.aklivity.zilla.runtime.engine.catalog.CatalogContext; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.CatalogConfig; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.engine.test.internal.catalog.TestCatalog; +import io.aklivity.zilla.runtime.engine.test.internal.catalog.config.TestCatalogOptionsConfig; +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; + +public class JsonValidatorTest +{ + private static final String OBJECT_SCHEMA = "{" + + "\"type\": \"object\"," + + "\"properties\": " + + "{" + + "\"id\": {" + + "\"type\": \"string\"" + + "}," + + "\"status\": {" + + "\"type\": \"string\"" + + "}" + + "}," + + "\"required\": [" + + "\"id\"," + + "\"status\"" + + "]" + + "}"; + + private static final String ARRAY_SCHEMA = "{" + + "\"type\": \"array\"," + + "\"items\": " + + OBJECT_SCHEMA + + "}"; + + private final JsonModelConfig config = JsonModelConfig.builder() + .catalog() + .name("test0") + .schema() + .strategy("topic") + .subject(null) + .version("latest") + .id(1) + .build() + .build() + .build(); + private CatalogContext context; + + @Before + public void init() + { + Properties properties = new Properties(); + properties.setProperty(ENGINE_DIRECTORY.name(), "target/zilla-itests"); + Configuration config = new Configuration(properties); + Catalog catalog = new TestCatalog(config); + context = catalog.supply(mock(EngineContext.class)); + } + + @Test + public void shouldVerifyValidCompleteJsonObject() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(OBJECT_SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + JsonValidatorHandler validator = new JsonValidatorHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + String payload = + "{" + + "\"id\": \"123\"," + + "\"status\": \"OK\"" + + "}"; + byte[] bytes = payload.getBytes(); + data.wrap(bytes, 0, bytes.length); + + assertTrue(validator.validate(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyInvalidCompleteJsonObject() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(OBJECT_SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + JsonValidatorHandler validator = new JsonValidatorHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + String payload = + "{" + + "\"id\": 123," + + "\"status\": \"OK\"" + + "}"; + byte[] bytes = payload.getBytes(); + data.wrap(bytes, 0, bytes.length); + + assertFalse(validator.validate(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyValidFragmentedJsonObject() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(OBJECT_SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + JsonValidatorHandler validator = new JsonValidatorHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + String payload = + "{" + + "\"id\": \"123\"," + + "\"status\": \"OK\"" + + "}"; + byte[] bytes = payload.getBytes(); + data.wrap(bytes, 0, bytes.length); + + assertTrue(validator.validate(FLAGS_INIT, data, 0, 12, ValueConsumer.NOP)); + assertTrue(validator.validate(FLAGS_FIN, data, 12, data.capacity() - 12, ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyInvalidFragmentedJsonObject() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(OBJECT_SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + JsonValidatorHandler validator = new JsonValidatorHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + String payload = + "{" + + "\"id\": 123," + + "\"status\": \"OK\"" + + "}"; + byte[] bytes = payload.getBytes(); + data.wrap(bytes, 0, bytes.length); + + assertTrue(validator.validate(FLAGS_INIT, data, 0, 12, ValueConsumer.NOP)); + assertFalse(validator.validate(FLAGS_FIN, data, 12, data.capacity() - 12, ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyValidJsonArray() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(ARRAY_SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + JsonValidatorHandler validator = new JsonValidatorHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + String payload = + "[" + + "{" + + "\"id\": \"123\"," + + "\"status\": \"OK\"" + + "}" + + "]"; + byte[] bytes = payload.getBytes(); + data.wrap(bytes, 0, bytes.length); + + assertTrue(validator.validate(data, 0, data.capacity(), ValueConsumer.NOP)); + } +} diff --git a/incubator/validator-json/src/test/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfigAdapterTest.java b/incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/config/JsonModelConfigAdapterTest.java similarity index 67% rename from incubator/validator-json/src/test/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfigAdapterTest.java rename to incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/config/JsonModelConfigAdapterTest.java index 2c764b0e40..670b7748e2 100644 --- a/incubator/validator-json/src/test/java/io/aklivity/zilla/runtime/validator/json/config/JsonValidatorConfigAdapterTest.java +++ b/incubator/model-json/src/test/java/io/aklivity/zilla/runtime/model/json/internal/config/JsonModelConfigAdapterTest.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.json.config; +package io.aklivity.zilla.runtime.model.json.internal.config; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; @@ -26,7 +26,9 @@ import org.junit.Before; import org.junit.Test; -public class JsonValidatorConfigAdapterTest +import io.aklivity.zilla.runtime.model.json.config.JsonModelConfig; + +public class JsonModelConfigAdapterTest { private Jsonb jsonb; @@ -34,17 +36,17 @@ public class JsonValidatorConfigAdapterTest public void initJson() { JsonbConfig config = new JsonbConfig() - .withAdapters(new JsonValidatorConfigAdapter()); + .withAdapters(new JsonModelConfigAdapter()); jsonb = JsonbBuilder.create(config); } @Test - public void shouldReadJsonValidator() + public void shouldReadJsonConverter() { // GIVEN String json = "{" + - "\"type\": \"json\"," + + "\"model\": \"json\"," + "\"catalog\":" + "{" + "\"test0\":" + @@ -65,31 +67,31 @@ public void shouldReadJsonValidator() "}"; // WHEN - JsonValidatorConfig validator = jsonb.fromJson(json, JsonValidatorConfig.class); + JsonModelConfig config = jsonb.fromJson(json, JsonModelConfig.class); // THEN - assertThat(validator, not(nullValue())); - assertThat(validator.type, equalTo("json")); - assertThat(validator.catalogs.size(), equalTo(1)); - assertThat(validator.catalogs.get(0).name, equalTo("test0")); - assertThat(validator.catalogs.get(0).schemas.get(0).subject, equalTo("subject1")); - assertThat(validator.catalogs.get(0).schemas.get(0).version, equalTo("latest")); - assertThat(validator.catalogs.get(0).schemas.get(0).id, equalTo(0)); - assertThat(validator.catalogs.get(0).schemas.get(1).strategy, equalTo("topic")); - assertThat(validator.catalogs.get(0).schemas.get(1).version, equalTo("latest")); - assertThat(validator.catalogs.get(0).schemas.get(1).id, equalTo(0)); - assertThat(validator.catalogs.get(0).schemas.get(2).strategy, nullValue()); - assertThat(validator.catalogs.get(0).schemas.get(2).version, nullValue()); - assertThat(validator.catalogs.get(0).schemas.get(2).id, equalTo(42)); + assertThat(config, not(nullValue())); + assertThat(config.model, equalTo("json")); + assertThat(config.cataloged.size(), equalTo(1)); + assertThat(config.cataloged.get(0).name, equalTo("test0")); + assertThat(config.cataloged.get(0).schemas.get(0).subject, equalTo("subject1")); + assertThat(config.cataloged.get(0).schemas.get(0).version, equalTo("latest")); + assertThat(config.cataloged.get(0).schemas.get(0).id, equalTo(0)); + assertThat(config.cataloged.get(0).schemas.get(1).strategy, equalTo("topic")); + assertThat(config.cataloged.get(0).schemas.get(1).version, equalTo("latest")); + assertThat(config.cataloged.get(0).schemas.get(1).id, equalTo(0)); + assertThat(config.cataloged.get(0).schemas.get(2).strategy, nullValue()); + assertThat(config.cataloged.get(0).schemas.get(2).version, nullValue()); + assertThat(config.cataloged.get(0).schemas.get(2).id, equalTo(42)); } @Test - public void shouldWriteJsonValidator() + public void shouldWriteJsonConverter() { // GIVEN String expectedJson = "{" + - "\"type\":\"json\"," + + "\"model\":\"json\"," + "\"catalog\":" + "{" + "\"test0\":" + @@ -108,7 +110,7 @@ public void shouldWriteJsonValidator() "]" + "}" + "}"; - JsonValidatorConfig validator = JsonValidatorConfig.builder() + JsonModelConfig config = JsonModelConfig.builder() .catalog() .name("test0") .schema() @@ -126,7 +128,7 @@ public void shouldWriteJsonValidator() .build(); // WHEN - String json = jsonb.toJson(validator); + String json = jsonb.toJson(config); // THEN assertThat(json, not(nullValue())); diff --git a/incubator/model-protobuf.spec/COPYRIGHT b/incubator/model-protobuf.spec/COPYRIGHT new file mode 100644 index 0000000000..0cb10b6f62 --- /dev/null +++ b/incubator/model-protobuf.spec/COPYRIGHT @@ -0,0 +1,12 @@ +Copyright ${copyrightYears} Aklivity Inc + +Licensed under the Aklivity Community License (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + https://www.aklivity.io/aklivity-community-license/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. diff --git a/incubator/model-protobuf.spec/LICENSE b/incubator/model-protobuf.spec/LICENSE new file mode 100644 index 0000000000..f6abb6327b --- /dev/null +++ b/incubator/model-protobuf.spec/LICENSE @@ -0,0 +1,114 @@ + Aklivity Community License Agreement + Version 1.0 + +This Aklivity Community License Agreement Version 1.0 (the “Agreement”) sets +forth the terms on which Aklivity, Inc. (“Aklivity”) makes available certain +software made available by Aklivity under this Agreement (the “Software”). BY +INSTALLING, DOWNLOADING, ACCESSING, USING OR DISTRIBUTING ANY OF THE SOFTWARE, +YOU AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE TO +SUCH TERMS AND CONDITIONS, YOU MUST NOT USE THE SOFTWARE. IF YOU ARE RECEIVING +THE SOFTWARE ON BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU +HAVE THE ACTUAL AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS +AGREEMENT ON BEHALF OF SUCH ENTITY. “Licensee” means you, an individual, or +the entity on whose behalf you are receiving the Software. + + 1. LICENSE GRANT AND CONDITIONS. + + 1.1 License. Subject to the terms and conditions of this Agreement, + Aklivity hereby grants to Licensee a non-exclusive, royalty-free, + worldwide, non-transferable, non-sublicenseable license during the term + of this Agreement to: (a) use the Software; (b) prepare modifications and + derivative works of the Software; (c) distribute the Software (including + without limitation in source code or object code form); and (d) reproduce + copies of the Software (the “License”). Licensee is not granted the + right to, and Licensee shall not, exercise the License for an Excluded + Purpose. For purposes of this Agreement, “Excluded Purpose” means making + available any software-as-a-service, platform-as-a-service, + infrastructure-as-a-service or other similar online service that competes + with Aklivity products or services that provide the Software. + + 1.2 Conditions. In consideration of the License, Licensee’s distribution + of the Software is subject to the following conditions: + + (a) Licensee must cause any Software modified by Licensee to carry + prominent notices stating that Licensee modified the Software. + + (b) On each Software copy, Licensee shall reproduce and not remove or + alter all Aklivity or third party copyright or other proprietary + notices contained in the Software, and Licensee must provide the + notice below with each copy. + + “This software is made available by Aklivity, Inc., under the + terms of the Aklivity Community License Agreement, Version 1.0 + located at http://www.Aklivity.io/Aklivity-community-license. BY + INSTALLING, DOWNLOADING, ACCESSING, USING OR DISTRIBUTING ANY OF + THE SOFTWARE, YOU AGREE TO THE TERMS OF SUCH LICENSE AGREEMENT.” + + 1.3 Licensee Modifications. Licensee may add its own copyright notices + to modifications made by Licensee and may provide additional or different + license terms and conditions for use, reproduction, or distribution of + Licensee’s modifications. While redistributing the Software or + modifications thereof, Licensee may choose to offer, for a fee or free of + charge, support, warranty, indemnity, or other obligations. Licensee, and + not Aklivity, will be responsible for any such obligations. + + 1.4 No Sublicensing. The License does not include the right to + sublicense the Software, however, each recipient to which Licensee + provides the Software may exercise the Licenses so long as such recipient + agrees to the terms and conditions of this Agreement. + + 2. TERM AND TERMINATION. This Agreement will continue unless and until + earlier terminated as set forth herein. If Licensee breaches any of its + conditions or obligations under this Agreement, this Agreement will + terminate automatically and the License will terminate automatically and + permanently. + + 3. INTELLECTUAL PROPERTY. As between the parties, Aklivity will retain all + right, title, and interest in the Software, and all intellectual property + rights therein. Aklivity hereby reserves all rights not expressly granted + to Licensee in this Agreement. Aklivity hereby reserves all rights in its + trademarks and service marks, and no licenses therein are granted in this + Agreement. + + 4. DISCLAIMER. Aklivity HEREBY DISCLAIMS ANY AND ALL WARRANTIES AND + CONDITIONS, EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, AND SPECIFICALLY + DISCLAIMS ANY WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR + PURPOSE, WITH RESPECT TO THE SOFTWARE. + + 5. LIMITATION OF LIABILITY. Aklivity WILL NOT BE LIABLE FOR ANY DAMAGES OF + ANY KIND, INCLUDING BUT NOT LIMITED TO, LOST PROFITS OR ANY CONSEQUENTIAL, + SPECIAL, INCIDENTAL, INDIRECT, OR DIRECT DAMAGES, HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, ARISING OUT OF THIS AGREEMENT. THE FOREGOING SHALL + APPLY TO THE EXTENT PERMITTED BY APPLICABLE LAW. + + 6.GENERAL. + + 6.1 Governing Law. This Agreement will be governed by and interpreted in + accordance with the laws of the state of California, without reference to + its conflict of laws principles. If Licensee is located within the + United States, all disputes arising out of this Agreement are subject to + the exclusive jurisdiction of courts located in Santa Clara County, + California. USA. If Licensee is located outside of the United States, + any dispute, controversy or claim arising out of or relating to this + Agreement will be referred to and finally determined by arbitration in + accordance with the JAMS International Arbitration Rules. The tribunal + will consist of one arbitrator. The place of arbitration will be Palo + Alto, California. The language to be used in the arbitral proceedings + will be English. Judgment upon the award rendered by the arbitrator may + be entered in any court having jurisdiction thereof. + + 6.2 Assignment. Licensee is not authorized to assign its rights under + this Agreement to any third party. Aklivity may freely assign its rights + under this Agreement to any third party. + + 6.3 Other. This Agreement is the entire agreement between the parties + regarding the subject matter hereof. No amendment or modification of + this Agreement will be valid or binding upon the parties unless made in + writing and signed by the duly authorized representatives of both + parties. In the event that any provision, including without limitation + any condition, of this Agreement is held to be unenforceable, this + Agreement and all licenses and rights granted hereunder will immediately + terminate. Waiver by Aklivity of a breach of any provision of this + Agreement or the failure by Aklivity to exercise any right hereunder + will not be construed as a waiver of any subsequent breach of that right + or as a waiver of any other right. \ No newline at end of file diff --git a/incubator/model-protobuf.spec/NOTICE b/incubator/model-protobuf.spec/NOTICE new file mode 100644 index 0000000000..ed4c502c75 --- /dev/null +++ b/incubator/model-protobuf.spec/NOTICE @@ -0,0 +1,23 @@ +Licensed under the Aklivity Community License (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + https://www.aklivity.io/aklivity-community-license/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +This project includes: + agrona under The Apache License, Version 2.0 + ANTLR 4 Runtime under BSD-3-Clause + ICU4J under Unicode/ICU License + Jakarta JSON Processing API under Eclipse Public License 2.0 or GNU General Public License, version 2 with the GNU Classpath Exception + Java Unified Expression Language API under The Apache Software License, Version 2.0 + Java Unified Expression Language Implementation under The Apache Software License, Version 2.0 + k3po/lang under The Apache Software License, Version 2.0 + Kaazing Corporation License under The Apache Software License, Version 2.0 + org.leadpony.justify under The Apache Software License, Version 2.0 + zilla::specs::engine.spec under The Apache Software License, Version 2.0 + diff --git a/incubator/model-protobuf.spec/NOTICE.template b/incubator/model-protobuf.spec/NOTICE.template new file mode 100644 index 0000000000..209ca12f74 --- /dev/null +++ b/incubator/model-protobuf.spec/NOTICE.template @@ -0,0 +1,13 @@ +Licensed under the Aklivity Community License (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + https://www.aklivity.io/aklivity-community-license/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +This project includes: +#GENERATED_NOTICES# diff --git a/incubator/model-protobuf.spec/mvnw b/incubator/model-protobuf.spec/mvnw new file mode 100755 index 0000000000..d2f0ea3808 --- /dev/null +++ b/incubator/model-protobuf.spec/mvnw @@ -0,0 +1,310 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven2 Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + fi + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` + fi + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=`cygpath --path --windows "$javaClass"` + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/incubator/model-protobuf.spec/mvnw.cmd b/incubator/model-protobuf.spec/mvnw.cmd new file mode 100644 index 0000000000..b26ab24f03 --- /dev/null +++ b/incubator/model-protobuf.spec/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven2 Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/incubator/model-protobuf.spec/pom.xml b/incubator/model-protobuf.spec/pom.xml new file mode 100644 index 0000000000..6fed03b03c --- /dev/null +++ b/incubator/model-protobuf.spec/pom.xml @@ -0,0 +1,111 @@ + + + +4.0.0 + + io.aklivity.zilla + incubator + 0.9.67 + ../pom.xml + + +model-protobuf.spec +zilla::incubator::model-protobuf.spec + + + + Aklivity Community License Agreement + https://www.aklivity.io/aklivity-community-license/ + repo + + + + + 11 + 11 + 0.98 + 0 + + + + + ${project.groupId} + engine.spec + ${project.version} + + + junit + junit + test + + + org.hamcrest + hamcrest-library + test + + + + + + + src/main/resources + + + src/main/scripts + + + + + + org.jasig.maven + maven-notice-plugin + + + com.mycila + license-maven-plugin + + + maven-checkstyle-plugin + + + org.apache.maven.plugins + maven-compiler-plugin + + + org.apache.maven.plugins + maven-surefire-plugin + + + org.moditect + moditect-maven-plugin + + + org.jacoco + jacoco-maven-plugin + + + + BUNDLE + + + INSTRUCTION + COVEREDRATIO + ${jacoco.coverage.ratio} + + + CLASS + MISSEDCOUNT + ${jacoco.missed.count} + + + + + + + + + + \ No newline at end of file diff --git a/incubator/model-protobuf.spec/src/main/moditect/module-info.java b/incubator/model-protobuf.spec/src/main/moditect/module-info.java new file mode 100644 index 0000000000..92a482d255 --- /dev/null +++ b/incubator/model-protobuf.spec/src/main/moditect/module-info.java @@ -0,0 +1,18 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +open module io.aklivity.zilla.specs.model.protobuf +{ + requires transitive io.aklivity.zilla.specs.engine; +} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt b/incubator/model-protobuf.spec/src/main/scripts/io/aklivity/zilla/specs/model/protobuf/config/model.yaml similarity index 52% rename from specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt rename to incubator/model-protobuf.spec/src/main/scripts/io/aklivity/zilla/specs/model/protobuf/config/model.yaml index 0d654c898a..cb958f494c 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt +++ b/incubator/model-protobuf.spec/src/main/scripts/io/aklivity/zilla/specs/model/protobuf/config/model.yaml @@ -13,20 +13,30 @@ # specific language governing permissions and limitations under the License. # -accept "zilla://streams/mqtt0" - option zilla:window 8192 - option zilla:transmission "duplex" -accepted - -read zilla:begin.ext ${mqtt:matchBeginEx() - .typeId(zilla:id("mqtt")) - .publish() - .clientId("client") - .topic("sensor/one") - .flags("RETAIN") - .build() - .build()} - -connected - -write advise zilla:flush +--- +name: test +catalogs: + test0: + type: test + options: + schema: | + syntax = "proto3"; + message example + { + string id = 1; + string status = 2; + } +bindings: + test: + kind: server + type: test + options: + value: + model: protobuf + view: json + catalog: + catalog0: + - subject: test0 + version: latest + record: example + exit: test diff --git a/incubator/model-protobuf.spec/src/main/scripts/io/aklivity/zilla/specs/model/protobuf/schema/protobuf.schema.patch.json b/incubator/model-protobuf.spec/src/main/scripts/io/aklivity/zilla/specs/model/protobuf/schema/protobuf.schema.patch.json new file mode 100644 index 0000000000..578800f5d2 --- /dev/null +++ b/incubator/model-protobuf.spec/src/main/scripts/io/aklivity/zilla/specs/model/protobuf/schema/protobuf.schema.patch.json @@ -0,0 +1,152 @@ +[ + { + "op": "add", + "path": "/$defs/converter/types/enum/-", + "value": "protobuf" + }, + { + "op": "add", + "path": "/$defs/converter/allOf/-", + "value": + { + "if": + { + "properties": + { + "model": + { + "const": "protobuf" + } + } + }, + "then": + { + "properties": + { + "model": + { + "const": "protobuf" + }, + "view": + { + "type": "string", + "enum": + [ + "json" + ] + }, + "catalog": + { + "type": "object", + "patternProperties": + { + "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": + { + "type": "array", + "items": + { + "oneOf": + [ + { + "type": "object", + "properties": + { + "id": + { + "type": "integer" + }, + "record": + { + "type": "string" + } + }, + "required": + [ + "id" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "schema": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + }, + "record": + { + "type": "string" + } + }, + "required": + [ + "schema" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "strategy": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + }, + "record": + { + "type": "string" + } + }, + "required": + [ + "strategy" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "subject": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + }, + "record": + { + "type": "string" + } + }, + "required": + [ + "subject" + ], + "additionalProperties": false + } + ] + } + } + }, + "maxProperties": 1 + } + }, + "additionalProperties": false + } + } + } +] diff --git a/incubator/model-protobuf.spec/src/test/java/io/aklivity/zilla/specs/model/protobuf/config/SchemaTest.java b/incubator/model-protobuf.spec/src/test/java/io/aklivity/zilla/specs/model/protobuf/config/SchemaTest.java new file mode 100644 index 0000000000..53b15a4b87 --- /dev/null +++ b/incubator/model-protobuf.spec/src/test/java/io/aklivity/zilla/specs/model/protobuf/config/SchemaTest.java @@ -0,0 +1,44 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.specs.model.protobuf.config; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +import jakarta.json.JsonObject; + +import org.junit.Rule; +import org.junit.Test; + +import io.aklivity.zilla.specs.engine.config.ConfigSchemaRule; + +public class SchemaTest +{ + @Rule + public final ConfigSchemaRule schema = new ConfigSchemaRule() + .schemaPatch("io/aklivity/zilla/specs/engine/schema/binding/test.schema.patch.json") + .schemaPatch("io/aklivity/zilla/specs/engine/schema/catalog/test.schema.patch.json") + .schemaPatch("io/aklivity/zilla/specs/model/protobuf/schema/protobuf.schema.patch.json") + .configurationRoot("io/aklivity/zilla/specs/model/protobuf/config"); + + @Test + public void shouldValidateCatalog() + { + JsonObject config = schema.validate("model.yaml"); + + assertThat(config, not(nullValue())); + } +} diff --git a/incubator/model-protobuf/COPYRIGHT b/incubator/model-protobuf/COPYRIGHT new file mode 100644 index 0000000000..0cb10b6f62 --- /dev/null +++ b/incubator/model-protobuf/COPYRIGHT @@ -0,0 +1,12 @@ +Copyright ${copyrightYears} Aklivity Inc + +Licensed under the Aklivity Community License (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + https://www.aklivity.io/aklivity-community-license/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. diff --git a/incubator/model-protobuf/LICENSE b/incubator/model-protobuf/LICENSE new file mode 100644 index 0000000000..f6abb6327b --- /dev/null +++ b/incubator/model-protobuf/LICENSE @@ -0,0 +1,114 @@ + Aklivity Community License Agreement + Version 1.0 + +This Aklivity Community License Agreement Version 1.0 (the “Agreement”) sets +forth the terms on which Aklivity, Inc. (“Aklivity”) makes available certain +software made available by Aklivity under this Agreement (the “Software”). BY +INSTALLING, DOWNLOADING, ACCESSING, USING OR DISTRIBUTING ANY OF THE SOFTWARE, +YOU AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE TO +SUCH TERMS AND CONDITIONS, YOU MUST NOT USE THE SOFTWARE. IF YOU ARE RECEIVING +THE SOFTWARE ON BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU +HAVE THE ACTUAL AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS +AGREEMENT ON BEHALF OF SUCH ENTITY. “Licensee” means you, an individual, or +the entity on whose behalf you are receiving the Software. + + 1. LICENSE GRANT AND CONDITIONS. + + 1.1 License. Subject to the terms and conditions of this Agreement, + Aklivity hereby grants to Licensee a non-exclusive, royalty-free, + worldwide, non-transferable, non-sublicenseable license during the term + of this Agreement to: (a) use the Software; (b) prepare modifications and + derivative works of the Software; (c) distribute the Software (including + without limitation in source code or object code form); and (d) reproduce + copies of the Software (the “License”). Licensee is not granted the + right to, and Licensee shall not, exercise the License for an Excluded + Purpose. For purposes of this Agreement, “Excluded Purpose” means making + available any software-as-a-service, platform-as-a-service, + infrastructure-as-a-service or other similar online service that competes + with Aklivity products or services that provide the Software. + + 1.2 Conditions. In consideration of the License, Licensee’s distribution + of the Software is subject to the following conditions: + + (a) Licensee must cause any Software modified by Licensee to carry + prominent notices stating that Licensee modified the Software. + + (b) On each Software copy, Licensee shall reproduce and not remove or + alter all Aklivity or third party copyright or other proprietary + notices contained in the Software, and Licensee must provide the + notice below with each copy. + + “This software is made available by Aklivity, Inc., under the + terms of the Aklivity Community License Agreement, Version 1.0 + located at http://www.Aklivity.io/Aklivity-community-license. BY + INSTALLING, DOWNLOADING, ACCESSING, USING OR DISTRIBUTING ANY OF + THE SOFTWARE, YOU AGREE TO THE TERMS OF SUCH LICENSE AGREEMENT.” + + 1.3 Licensee Modifications. Licensee may add its own copyright notices + to modifications made by Licensee and may provide additional or different + license terms and conditions for use, reproduction, or distribution of + Licensee’s modifications. While redistributing the Software or + modifications thereof, Licensee may choose to offer, for a fee or free of + charge, support, warranty, indemnity, or other obligations. Licensee, and + not Aklivity, will be responsible for any such obligations. + + 1.4 No Sublicensing. The License does not include the right to + sublicense the Software, however, each recipient to which Licensee + provides the Software may exercise the Licenses so long as such recipient + agrees to the terms and conditions of this Agreement. + + 2. TERM AND TERMINATION. This Agreement will continue unless and until + earlier terminated as set forth herein. If Licensee breaches any of its + conditions or obligations under this Agreement, this Agreement will + terminate automatically and the License will terminate automatically and + permanently. + + 3. INTELLECTUAL PROPERTY. As between the parties, Aklivity will retain all + right, title, and interest in the Software, and all intellectual property + rights therein. Aklivity hereby reserves all rights not expressly granted + to Licensee in this Agreement. Aklivity hereby reserves all rights in its + trademarks and service marks, and no licenses therein are granted in this + Agreement. + + 4. DISCLAIMER. Aklivity HEREBY DISCLAIMS ANY AND ALL WARRANTIES AND + CONDITIONS, EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, AND SPECIFICALLY + DISCLAIMS ANY WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR + PURPOSE, WITH RESPECT TO THE SOFTWARE. + + 5. LIMITATION OF LIABILITY. Aklivity WILL NOT BE LIABLE FOR ANY DAMAGES OF + ANY KIND, INCLUDING BUT NOT LIMITED TO, LOST PROFITS OR ANY CONSEQUENTIAL, + SPECIAL, INCIDENTAL, INDIRECT, OR DIRECT DAMAGES, HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, ARISING OUT OF THIS AGREEMENT. THE FOREGOING SHALL + APPLY TO THE EXTENT PERMITTED BY APPLICABLE LAW. + + 6.GENERAL. + + 6.1 Governing Law. This Agreement will be governed by and interpreted in + accordance with the laws of the state of California, without reference to + its conflict of laws principles. If Licensee is located within the + United States, all disputes arising out of this Agreement are subject to + the exclusive jurisdiction of courts located in Santa Clara County, + California. USA. If Licensee is located outside of the United States, + any dispute, controversy or claim arising out of or relating to this + Agreement will be referred to and finally determined by arbitration in + accordance with the JAMS International Arbitration Rules. The tribunal + will consist of one arbitrator. The place of arbitration will be Palo + Alto, California. The language to be used in the arbitral proceedings + will be English. Judgment upon the award rendered by the arbitrator may + be entered in any court having jurisdiction thereof. + + 6.2 Assignment. Licensee is not authorized to assign its rights under + this Agreement to any third party. Aklivity may freely assign its rights + under this Agreement to any third party. + + 6.3 Other. This Agreement is the entire agreement between the parties + regarding the subject matter hereof. No amendment or modification of + this Agreement will be valid or binding upon the parties unless made in + writing and signed by the duly authorized representatives of both + parties. In the event that any provision, including without limitation + any condition, of this Agreement is held to be unenforceable, this + Agreement and all licenses and rights granted hereunder will immediately + terminate. Waiver by Aklivity of a breach of any provision of this + Agreement or the failure by Aklivity to exercise any right hereunder + will not be construed as a waiver of any subsequent breach of that right + or as a waiver of any other right. \ No newline at end of file diff --git a/incubator/model-protobuf/NOTICE b/incubator/model-protobuf/NOTICE new file mode 100644 index 0000000000..aa95b451ff --- /dev/null +++ b/incubator/model-protobuf/NOTICE @@ -0,0 +1,23 @@ +Licensed under the Aklivity Community License (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + https://www.aklivity.io/aklivity-community-license/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +This project includes: + error-prone annotations under Apache 2.0 + FindBugs-jsr305 under The Apache Software License, Version 2.0 + Gson under Apache-2.0 + Guava: Google Core Libraries for Java under Apache License, Version 2.0 + J2ObjC Annotations under Apache License, Version 2.0 + Protocol Buffers [Core] under BSD-3-Clause + Protocol Buffers [Util] under BSD-3-Clause + + +This project also includes code under copyright of the following entities: + https://github.com/reaktivity/ diff --git a/incubator/model-protobuf/NOTICE.template b/incubator/model-protobuf/NOTICE.template new file mode 100644 index 0000000000..ff901de01b --- /dev/null +++ b/incubator/model-protobuf/NOTICE.template @@ -0,0 +1,16 @@ +Licensed under the Aklivity Community License (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + https://www.aklivity.io/aklivity-community-license/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +This project includes: +#GENERATED_NOTICES# + +This project also includes code under copyright of the following entities: + https://github.com/reaktivity/ \ No newline at end of file diff --git a/incubator/model-protobuf/mvnw b/incubator/model-protobuf/mvnw new file mode 100755 index 0000000000..d2f0ea3808 --- /dev/null +++ b/incubator/model-protobuf/mvnw @@ -0,0 +1,310 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven2 Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + fi + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` + fi + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=`cygpath --path --windows "$javaClass"` + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/incubator/model-protobuf/mvnw.cmd b/incubator/model-protobuf/mvnw.cmd new file mode 100644 index 0000000000..b26ab24f03 --- /dev/null +++ b/incubator/model-protobuf/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven2 Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/incubator/model-protobuf/pom.xml b/incubator/model-protobuf/pom.xml new file mode 100644 index 0000000000..042d5e6727 --- /dev/null +++ b/incubator/model-protobuf/pom.xml @@ -0,0 +1,209 @@ + + + +4.0.0 + + io.aklivity.zilla + incubator + 0.9.67 + ../pom.xml + + +model-protobuf +zilla::incubator::model-protobuf + + + + Aklivity Community License Agreement + https://www.aklivity.io/aklivity-community-license/ + repo + + + + + 11 + 11 + 0.90 + 0 + + + + + ${project.groupId} + model-protobuf.spec + ${project.version} + provided + + + ${project.groupId} + engine + ${project.version} + provided + + + com.google.protobuf + protobuf-java + 3.24.4 + + + com.google.protobuf + protobuf-java-util + 3.24.4 + + + org.antlr + antlr4-runtime + provided + + + ${project.groupId} + engine + test-jar + ${project.version} + test + + + org.kaazing + k3po.junit + test + + + org.kaazing + k3po.lang + test + + + org.mockito + mockito-core + test + + + + + + + org.jasig.maven + maven-notice-plugin + + + com.mycila + license-maven-plugin + + + maven-checkstyle-plugin + + + org.antlr + antlr4-maven-plugin + + + maven-dependency-plugin + + + process-resources + + unpack + + + + + ${project.groupId} + model-protobuf.spec + + + ^\Qio/aklivity/zilla/specs/model/protobuf/\E + io/aklivity/zilla/runtime/model/protobuf/internal/ + + + + + io/aklivity/zilla/specs/model/protobuf/schema/protobuf.schema.patch.json + ${project.build.directory}/classes + + + + unpack-proto + generate-sources + + unpack + + + + + ${project.groupId} + model-protobuf.spec + ${project.version} + ${basedir}/target/test-classes + **\/*.proto + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + + org.apache.maven.plugins + maven-surefire-plugin + + + org.moditect + moditect-maven-plugin + + + org.apache.maven.plugins + maven-failsafe-plugin + + + org.jacoco + jacoco-maven-plugin + + + io/aklivity/zilla/runtime/model/protobuf/internal/parser/**/*.class + + + + BUNDLE + + + INSTRUCTION + COVEREDRATIO + ${jacoco.coverage.ratio} + + + CLASS + MISSEDCOUNT + ${jacoco.missed.count} + + + + + + + + org.kaazing + k3po-maven-plugin + + + ${project.groupId} + engine + ${project.version} + test-jar + + + ${project.groupId} + engine + ${project.version} + + + + + + + \ No newline at end of file diff --git a/incubator/model-protobuf/src/main/antlr4/io/aklivity/zilla/runtime/model/protobuf/internal/parser/Protobuf3.g4 b/incubator/model-protobuf/src/main/antlr4/io/aklivity/zilla/runtime/model/protobuf/internal/parser/Protobuf3.g4 new file mode 100644 index 0000000000..ce9835dbee --- /dev/null +++ b/incubator/model-protobuf/src/main/antlr4/io/aklivity/zilla/runtime/model/protobuf/internal/parser/Protobuf3.g4 @@ -0,0 +1,400 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +grammar Protobuf3; + +proto + : syntax + ( + importStatement + | packageStatement + | optionStatement + | topLevelDef + | emptyStatement_ + )* EOF + ; + +// Syntax + +syntax + : SYNTAX EQ (PROTO3_LIT_SINGLE | PROTO3_LIT_DOUBLE) SEMI + ; + +// Import Statement + +importStatement + : IMPORT ( WEAK | PUBLIC )? strLit SEMI + ; + +// Package + +packageStatement + : PACKAGE fullIdent SEMI + ; + +// Option + +optionStatement + : OPTION optionName EQ constant SEMI + ; + +optionName + : fullIdent + | LP fullIdent RP ( DOT fullIdent )? + ; + +// Normal Field +fieldLabel + : OPTIONAL | REPEATED + ; + +field + : fieldLabel? type_ fieldName EQ fieldNumber ( LB fieldOptions RB )? SEMI + ; + +fieldOptions + : fieldOption ( COMMA fieldOption )* + ; + +fieldOption + : optionName EQ constant + ; + +fieldNumber + : intLit + ; + +// Oneof and oneof field + +oneof + : ONEOF oneofName LC ( optionStatement | oneofField | emptyStatement_ )* RC + ; + +oneofField + : type_ fieldName EQ fieldNumber ( LB fieldOptions RB )? SEMI + ; + +// Map field + +mapField + : MAP LT keyType COMMA type_ GT mapName + EQ fieldNumber ( LB fieldOptions RB )? SEMI + ; +keyType + : INT32 + | INT64 + | UINT32 + | UINT64 + | SINT32 + | SINT64 + | FIXED32 + | FIXED64 + | SFIXED32 + | SFIXED64 + | BOOL + | STRING + ; + +// field types + +type_ + : DOUBLE + | FLOAT + | INT32 + | INT64 + | UINT32 + | UINT64 + | SINT32 + | SINT64 + | FIXED32 + | FIXED64 + | SFIXED32 + | SFIXED64 + | BOOL + | STRING + | BYTES + | messageType + | enumType + ; + +// Reserved + +reserved + : RESERVED ( ranges | reservedFieldNames ) SEMI + ; + +ranges + : range_ ( COMMA range_ )* + ; + +range_ + : intLit ( TO ( intLit | MAX ) )? + ; + +reservedFieldNames + : strLit ( COMMA strLit )* + ; + +// Top Level definitions + +topLevelDef + : messageDef + | enumDef + | extendDef + | serviceDef + ; + +// enum + +enumDef + : ENUM enumName enumBody + ; + +enumBody + : LC enumElement* RC + ; + +enumElement + : optionStatement + | enumField + | emptyStatement_ + ; + +enumField + : ident EQ ( MINUS )? intLit enumValueOptions?SEMI + ; + +enumValueOptions + : LB enumValueOption ( COMMA enumValueOption )* RB + ; + +enumValueOption + : optionName EQ constant + ; + +// message + +messageDef + : MESSAGE messageName messageBody + ; + +messageBody + : LC messageElement* RC + ; + +messageElement + : field + | enumDef + | messageDef + | extendDef + | optionStatement + | oneof + | mapField + | reserved + | emptyStatement_ + ; + +// Extend definition +// +// NB: not defined in the spec but supported by protoc and covered by protobuf3 tests +// see e.g. php/tests/proto/test_import_descriptor_proto.proto +// of https://github.com/protocolbuffers/protobuf +// it also was discussed here: https://github.com/protocolbuffers/protobuf/issues/4610 + +extendDef + : EXTEND messageType LC ( field + | emptyStatement_ + )* RC + ; + +// service + +serviceDef + : SERVICE serviceName LC serviceElement* RC + ; + +serviceElement + : optionStatement + | rpc + | emptyStatement_ + ; + +rpc + : RPC rpcName LP ( clientStreaming=STREAM )? messageType RP + RETURNS LP ( serverStreaming=STREAM )? messageType RP + (LC ( optionStatement | emptyStatement_ )* RC | SEMI) + ; + +// lexical + +constant + : fullIdent + | (MINUS | PLUS )? intLit + | ( MINUS | PLUS )? floatLit + | strLit + | boolLit + | blockLit + ; + +// not specified in specification but used in tests +blockLit + : LC ( ident COLON constant )* RC + ; + +emptyStatement_: SEMI; + +// Lexical elements + +ident: IDENTIFIER | keywords; +fullIdent: ident ( DOT ident )*; +messageName: ident; +enumName: ident; +fieldName: ident; +oneofName: ident; +mapName: ident; +serviceName: ident; +rpcName: ident; +messageType: ( DOT )? ( ident DOT )* messageName; +enumType: ( DOT )? ( ident DOT )* enumName; + +intLit: INT_LIT; +strLit: STR_LIT | PROTO3_LIT_SINGLE | PROTO3_LIT_DOBULE; +boolLit: BOOL_LIT; +floatLit: FLOAT_LIT; + +// keywords +SYNTAX: 'syntax'; +IMPORT: 'import'; +WEAK: 'weak'; +PUBLIC: 'public'; +PACKAGE: 'package'; +OPTION: 'option'; +OPTIONAL: 'optional'; +REPEATED: 'repeated'; +ONEOF: 'oneof'; +MAP: 'map'; +INT32: 'int32'; +INT64: 'int64'; +UINT32: 'uint32'; +UINT64: 'uint64'; +SINT32: 'sint32'; +SINT64: 'sint64'; +FIXED32: 'fixed32'; +FIXED64: 'fixed64'; +SFIXED32: 'sfixed32'; +SFIXED64: 'sfixed64'; +BOOL: 'bool'; +STRING: 'string'; +DOUBLE: 'double'; +FLOAT: 'float'; +BYTES: 'bytes'; +RESERVED: 'reserved'; +TO: 'to'; +MAX: 'max'; +ENUM: 'enum'; +MESSAGE: 'message'; +SERVICE: 'service'; +EXTEND: 'extend'; +RPC: 'rpc'; +STREAM: 'stream'; +RETURNS: 'returns'; + +PROTO3_LIT_SINGLE: '"proto3"'; +PROTO3_LIT_DOBULE: '\'proto3\''; + +// symbols + +SEMI: ';'; +EQ: '='; +LP: '('; +RP: ')'; +LB: '['; +RB: ']'; +LC: '{'; +RC: '}'; +LT: '<'; +GT: '>'; +DOT: '.'; +COMMA: ','; +COLON: ':'; +PLUS: '+'; +MINUS: '-'; + +STR_LIT: ( '\'' ( CHAR_VALUE )*? '\'' ) | ( '"' ( CHAR_VALUE )*? '"' ); +fragment CHAR_VALUE: HEX_ESCAPE | OCT_ESCAPE | CHAR_ESCAPE | ~[\u0000\n\\]; +fragment HEX_ESCAPE: '\\' ( 'x' | 'X' ) HEX_DIGIT HEX_DIGIT; +fragment OCT_ESCAPE: '\\' OCTAL_DIGIT OCTAL_DIGIT OCTAL_DIGIT; +fragment CHAR_ESCAPE: '\\' ( 'a' | 'b' | 'f' | 'n' | 'r' | 't' | 'v' | '\\' | '\'' | '"' ); + +BOOL_LIT: 'true' | 'false'; + +FLOAT_LIT : ( DECIMALS DOT DECIMALS? EXPONENT? | DECIMALS EXPONENT | DOT DECIMALS EXPONENT? ) | 'inf' | 'nan'; +fragment EXPONENT : ( 'e' | 'E' ) (PLUS | MINUS)? DECIMALS; +fragment DECIMALS : DECIMAL_DIGIT+; + +INT_LIT : DECIMAL_LIT | OCTAL_LIT | HEX_LIT; +fragment DECIMAL_LIT : ( [1-9] ) DECIMAL_DIGIT*; +fragment OCTAL_LIT : '0' OCTAL_DIGIT*; +fragment HEX_LIT : '0' ( 'x' | 'X' ) HEX_DIGIT+ ; + +IDENTIFIER: LETTER ( LETTER | DECIMAL_DIGIT )*; + +fragment LETTER: [A-Za-z_]; +fragment DECIMAL_DIGIT: [0-9]; +fragment OCTAL_DIGIT: [0-7]; +fragment HEX_DIGIT: [0-9A-Fa-f]; + +// comments +WS : [ \t\r\n\u000C]+ -> skip; +LINE_COMMENT: '//' ~[\r\n]* -> channel(HIDDEN); +COMMENT: '/*' .*? '*/' -> channel(HIDDEN); + +keywords + : SYNTAX + | IMPORT + | WEAK + | PUBLIC + | PACKAGE + | OPTION + | OPTIONAL + | REPEATED + | ONEOF + | MAP + | INT32 + | INT64 + | UINT32 + | UINT64 + | SINT32 + | SINT64 + | FIXED32 + | FIXED64 + | SFIXED32 + | SFIXED64 + | BOOL + | STRING + | DOUBLE + | FLOAT + | BYTES + | RESERVED + | TO + | MAX + | ENUM + | MESSAGE + | SERVICE + | EXTEND + | RPC + | STREAM + | RETURNS + | BOOL_LIT + ; diff --git a/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfig.java b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/config/ProtobufModelConfig.java similarity index 55% rename from incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfig.java rename to incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/config/ProtobufModelConfig.java index 54ced3bb20..35da0ec2b4 100644 --- a/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfig.java +++ b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/config/ProtobufModelConfig.java @@ -12,36 +12,37 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.avro.config; +package io.aklivity.zilla.runtime.model.protobuf.config; import java.util.List; import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; -public final class AvroValidatorConfig extends ValidatorConfig +public final class ProtobufModelConfig extends ModelConfig { - public final List catalogs; public final String subject; + public final String view; - public AvroValidatorConfig( - List catalogs, - String subject) + public ProtobufModelConfig( + List cataloged, + String subject, + String view) { - super("avro"); - this.catalogs = catalogs; + super("protobuf", cataloged); this.subject = subject; + this.view = view; } - public static AvroValidatorConfigBuilder builder( - Function mapper) + public static ProtobufModelConfigBuilder builder( + Function mapper) { - return new AvroValidatorConfigBuilder<>(mapper::apply); + return new ProtobufModelConfigBuilder<>(mapper::apply); } - public static AvroValidatorConfigBuilder builder() + public static ProtobufModelConfigBuilder builder() { - return new AvroValidatorConfigBuilder<>(AvroValidatorConfig.class::cast); + return new ProtobufModelConfigBuilder<>(ProtobufModelConfig.class::cast); } } diff --git a/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/config/ProtobufModelConfigBuilder.java b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/config/ProtobufModelConfigBuilder.java new file mode 100644 index 0000000000..86a17c2ac7 --- /dev/null +++ b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/config/ProtobufModelConfigBuilder.java @@ -0,0 +1,81 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.protobuf.config; + +import java.util.LinkedList; +import java.util.List; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.CatalogedConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public class ProtobufModelConfigBuilder extends ConfigBuilder> +{ + private final Function mapper; + + private List catalogs; + private String subject; + private String view; + + ProtobufModelConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + + public CatalogedConfigBuilder> catalog() + { + return CatalogedConfig.builder(this::catalog); + } + + public ProtobufModelConfigBuilder subject( + String subject) + { + this.subject = subject; + return this; + } + + public ProtobufModelConfigBuilder catalog( + CatalogedConfig catalog) + { + if (catalogs == null) + { + catalogs = new LinkedList<>(); + } + catalogs.add(catalog); + return this; + } + + public ProtobufModelConfigBuilder view( + String view) + { + this.view = view; + return this; + } + + @Override + public T build() + { + return mapper.apply(new ProtobufModelConfig(catalogs, subject, view)); + } +} diff --git a/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/DescriptorTree.java b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/DescriptorTree.java new file mode 100644 index 0000000000..8b7a3cd3b8 --- /dev/null +++ b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/DescriptorTree.java @@ -0,0 +1,151 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.protobuf.internal; + +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.FileDescriptor; + +public class DescriptorTree +{ + protected final Map children; + protected final List indexes; + + protected Descriptors.Descriptor descriptor; + protected String name; + + private DescriptorTree() + { + this.children = new LinkedHashMap<>(); + this.indexes = new LinkedList<>(); + } + + protected DescriptorTree( + FileDescriptor fileDescriptors) + { + this(); + this.name = fileDescriptors.getPackage(); + for (Descriptor descriptor : fileDescriptors.getMessageTypes()) + { + addDescriptor(descriptor); + addNestedDescriptors(descriptor); + } + } + + protected DescriptorTree findByName( + String path) + { + DescriptorTree current = this; + int start = 0; + int end; + + while (start < path.length()) + { + end = path.indexOf('.', start); + if (end == -1) + { + end = path.length(); + } + + String part = path.substring(start, end); + current = current.children.get(part); + + if (current == null) + { + break; + } + start = end + 1; + } + return current; + } + + protected Descriptor findByIndexes( + List indexes) + { + DescriptorTree current = this; + + for (Integer index : indexes) + { + current = current.findChild(index); + if (current == null) + { + break; + } + } + return current != null ? current.descriptor : null; + } + + private DescriptorTree findParent( + String path) + { + int index = path.lastIndexOf('.'); + String part = index >= 0 ? path.substring(index + 1) : path; + return this.children.getOrDefault(part, null); + } + + private DescriptorTree findChild( + int index) + { + DescriptorTree tree = this; + int currentIndex = 0; + for (Map.Entry entry : children.entrySet()) + { + if (currentIndex == index) + { + tree = entry.getValue(); + break; + } + currentIndex++; + } + return tree; + } + + private void addNestedDescriptor( + Descriptor parent, + int index) + { + DescriptorTree parentNode = findParent(parent.getFullName()); + if (parentNode != null) + { + Descriptors.Descriptor nestedDescriptor = parent.getNestedTypes().get(index); + parentNode.addDescriptor(nestedDescriptor); + parentNode.addNestedDescriptors(nestedDescriptor); + } + } + + private void addDescriptor( + Descriptor descriptor) + { + DescriptorTree node = new DescriptorTree(); + node.descriptor = descriptor; + node.name = name; + node.indexes.addAll(this.indexes); + node.indexes.add(this.children.size()); + this.children.put(descriptor.getName(), node); + } + + private void addNestedDescriptors(Descriptor descriptor) + { + for (int i = 0; i < descriptor.getNestedTypes().size(); i++) + { + addNestedDescriptor(descriptor, i); + } + } +} diff --git a/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtoListener.java b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtoListener.java new file mode 100644 index 0000000000..420278e4fd --- /dev/null +++ b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtoListener.java @@ -0,0 +1,180 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.protobuf.internal; + +import static java.util.Map.entry; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Stack; + +import com.google.protobuf.DescriptorProtos; +import com.google.protobuf.DescriptorProtos.DescriptorProto; +import com.google.protobuf.DescriptorProtos.FieldDescriptorProto; +import com.google.protobuf.DescriptorProtos.FieldDescriptorProto.Label; +import com.google.protobuf.DescriptorProtos.FieldDescriptorProto.Type; +import com.google.protobuf.DescriptorProtos.FileDescriptorProto; + +import io.aklivity.zilla.runtime.model.protobuf.internal.parser.Protobuf3BaseListener; +import io.aklivity.zilla.runtime.model.protobuf.internal.parser.Protobuf3Parser; + +public class ProtoListener extends Protobuf3BaseListener +{ + private static final Map TYPES = Map.ofEntries( + entry("double", Type.TYPE_DOUBLE), + entry("float", Type.TYPE_FLOAT), + entry("int32", Type.TYPE_INT32), + entry("int64", Type.TYPE_INT64), + entry("uint32", Type.TYPE_UINT32), + entry("uint64", Type.TYPE_UINT64), + entry("sint32", Type.TYPE_SINT32), + entry("sint64", Type.TYPE_SINT64), + entry("fixed32", Type.TYPE_FIXED32), + entry("fixed64", Type.TYPE_FIXED64), + entry("sfixed32", Type.TYPE_SFIXED32), + entry("sfixed64", Type.TYPE_SFIXED64), + entry("bool", Type.TYPE_BOOL), + entry("string", Type.TYPE_STRING), + entry("bytes", Type.TYPE_BYTES) + ); + + private static final Map LABELS = Map.ofEntries( + entry("optional", Label.LABEL_OPTIONAL), + entry("required", Label.LABEL_REQUIRED), + entry("repeated", Label.LABEL_REPEATED) + ); + + private String packageName; + private List imports; + private final FileDescriptorProto.Builder builder; + private Stack messageHierarchy = new Stack<>(); + + public ProtoListener() + { + this.imports = new ArrayList<>(); + this.builder = FileDescriptorProto.newBuilder(); + } + + @Override + public void enterSyntax( + Protobuf3Parser.SyntaxContext ctx) + { + builder.setSyntax(ctx.getChild(2).getText()); + } + + @Override + public void enterPackageStatement( + Protobuf3Parser.PackageStatementContext ctx) + { + packageName = ctx.fullIdent().getText(); + builder.setPackage(packageName); + } + + @Override + public void enterImportStatement( + Protobuf3Parser.ImportStatementContext ctx) + { + String importStatement = ctx.strLit().getText(); + imports.add(importStatement); + System.out.println("Import statements are currently not supported"); + } + + @Override + public void enterMessageDef( + Protobuf3Parser.MessageDefContext ctx) + { + DescriptorProto.Builder builder = DescriptorProto.newBuilder(); + String name = ctx.messageName().getText(); + builder.setName(name); + messageHierarchy.push(name); + + for (Protobuf3Parser.MessageElementContext element : ctx.messageBody().messageElement()) + { + if (element.field() != null) + { + builder.addField(processFieldElement(element.field())); + } + if (element.messageDef() != null) + { + builder.addNestedType(processNestedMessage(element.messageDef())); + } + } + if (messageHierarchy.size() == 1) + { + this.builder.addMessageType(builder.build()); + builder.clear(); + } + } + + @Override + public void exitMessageDef( + Protobuf3Parser.MessageDefContext ctx) + { + messageHierarchy.pop(); + } + + public DescriptorProtos.FileDescriptorProto build() + { + return builder.build(); + } + + private DescriptorProto processNestedMessage( + Protobuf3Parser.MessageDefContext ctx) + { + DescriptorProto.Builder builder = DescriptorProto.newBuilder(); + String name = ctx.messageName().getText(); + builder.setName(name); + + for (Protobuf3Parser.MessageElementContext element : ctx.messageBody().messageElement()) + { + if (element.field() != null) + { + builder.addField(processFieldElement(element.field())); + } + if (element.messageDef() != null) + { + builder.addNestedType(processNestedMessage(element.messageDef())); + } + } + return builder.build(); + } + + private FieldDescriptorProto processFieldElement( + Protobuf3Parser.FieldContext ctx) + { + FieldDescriptorProto.Builder builder = FieldDescriptorProto.newBuilder(); + String type = ctx.type_().getText(); + String name = ctx.fieldName().getText(); + String label = ctx.fieldLabel() != null ? ctx.fieldLabel().getText() : null; + int number = Integer.parseInt(ctx.fieldNumber().getText()); + + builder.setName(name); + builder.setNumber(number); + if (label != null) + { + builder.setLabel(LABELS.get(label)); + } + if (TYPES.containsKey(type)) + { + builder.setType(TYPES.get(type)); + } + else + { + builder.setTypeName(type); + } + return builder.build(); + } +} diff --git a/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModel.java b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModel.java new file mode 100644 index 0000000000..a5648f4288 --- /dev/null +++ b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModel.java @@ -0,0 +1,45 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.protobuf.internal; + +import java.net.URL; + +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; + +public class ProtobufModel implements Model +{ + public static final String NAME = "protobuf"; + + @Override + public String name() + { + return NAME; + } + + @Override + public ModelContext supply( + EngineContext context) + { + return new ProtobufModelContext(context); + } + + @Override + public URL type() + { + return getClass().getResource("schema/protobuf.schema.patch.json"); + } +} diff --git a/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelContext.java b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelContext.java new file mode 100644 index 0000000000..6998afb18c --- /dev/null +++ b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelContext.java @@ -0,0 +1,49 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.protobuf.internal; + +import java.util.function.LongFunction; + +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.model.protobuf.config.ProtobufModelConfig; + +public class ProtobufModelContext implements ModelContext +{ + private final LongFunction supplyCatalog; + + public ProtobufModelContext( + EngineContext context) + { + this.supplyCatalog = context::supplyCatalog; + } + + @Override + public ConverterHandler supplyReadConverterHandler( + ModelConfig config) + { + return new ProtobufReadConverterHandler(ProtobufModelConfig.class.cast(config), supplyCatalog); + } + + @Override + public ConverterHandler supplyWriteConverterHandler( + ModelConfig config) + { + return new ProtobufWriteConverterHandler(ProtobufModelConfig.class.cast(config), supplyCatalog); + } +} diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfigAdapter.java b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelFactorySpi.java similarity index 50% rename from incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfigAdapter.java rename to incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelFactorySpi.java index 69f57d0f9b..4804dbef38 100644 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfigAdapter.java +++ b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelFactorySpi.java @@ -12,34 +12,33 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.core.config; +package io.aklivity.zilla.runtime.model.protobuf.internal; -import jakarta.json.Json; -import jakarta.json.JsonValue; -import jakarta.json.bind.adapter.JsonbAdapter; +import java.net.URL; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi; +import io.aklivity.zilla.runtime.common.feature.Incubating; +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelFactorySpi; -public class LongValidatorConfigAdapter implements ValidatorConfigAdapterSpi, JsonbAdapter +@Incubating +public final class ProtobufModelFactorySpi implements ModelFactorySpi { @Override public String type() { - return "long"; + return ProtobufModel.NAME; } - @Override - public JsonValue adaptToJson( - ValidatorConfig options) + public URL schema() { - return Json.createValue(type()); + return getClass().getResource("schema/protobuf.schema.patch.json"); } @Override - public ValidatorConfig adaptFromJson( - JsonValue object) + public Model create( + Configuration config) { - return new LongValidatorConfig(); + return new ProtobufModel(); } } diff --git a/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelHandler.java b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelHandler.java new file mode 100644 index 0000000000..39dd41e64c --- /dev/null +++ b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelHandler.java @@ -0,0 +1,273 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.protobuf.internal; + +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; +import java.util.function.LongFunction; + +import org.agrona.BitUtil; +import org.agrona.DirectBuffer; +import org.agrona.ExpandableDirectByteBuffer; +import org.agrona.collections.Int2IntHashMap; +import org.agrona.collections.Int2ObjectCache; +import org.agrona.collections.Object2ObjectHashMap; +import org.agrona.io.DirectBufferInputStream; +import org.agrona.io.ExpandableDirectBufferOutputStream; +import org.antlr.v4.runtime.BailErrorStrategy; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.CharStreams; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.tree.ParseTreeWalker; + +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import com.google.protobuf.Descriptors.FileDescriptor; +import com.google.protobuf.DynamicMessage; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.SchemaConfig; +import io.aklivity.zilla.runtime.model.protobuf.config.ProtobufModelConfig; +import io.aklivity.zilla.runtime.model.protobuf.internal.parser.Protobuf3Lexer; +import io.aklivity.zilla.runtime.model.protobuf.internal.parser.Protobuf3Parser; + +public class ProtobufModelHandler +{ + protected static final byte[] ZERO_INDEX = new byte[]{0x0}; + protected static final String VIEW_JSON = "json"; + + private static final int JSON_FIELD_STRUCTURE_LENGTH = "\"\":\"\",".length(); + private static final int JSON_OBJECT_CURLY_BRACES = 2; + + protected final SchemaConfig catalog; + protected final CatalogHandler handler; + protected final String subject; + protected final String view; + protected final List indexes; + protected final DirectBufferInputStream in; + protected final ExpandableDirectBufferOutputStream out; + + private final Int2ObjectCache descriptors; + private final Int2ObjectCache tree; + private final Object2ObjectHashMap builders; + private final FileDescriptor[] dependencies; + private final Int2IntHashMap paddings; + + protected ProtobufModelHandler( + ProtobufModelConfig config, + LongFunction supplyCatalog) + { + CatalogedConfig cataloged = config.cataloged.get(0); + this.handler = supplyCatalog.apply(cataloged.id); + this.catalog = cataloged.schemas.size() != 0 ? cataloged.schemas.get(0) : null; + this.subject = catalog != null && catalog.subject != null + ? catalog.subject + : config.subject; + this.view = config.view; + this.descriptors = new Int2ObjectCache<>(1, 1024, i -> {}); + this.tree = new Int2ObjectCache<>(1, 1024, i -> {}); + this.builders = new Object2ObjectHashMap<>(); + this.in = new DirectBufferInputStream(); + this.dependencies = new FileDescriptor[0]; + this.indexes = new LinkedList<>(); + this.paddings = new Int2IntHashMap(-1); + this.out = new ExpandableDirectBufferOutputStream(new ExpandableDirectByteBuffer()); + } + + protected FileDescriptor supplyDescriptor( + int schemaId) + { + return descriptors.computeIfAbsent(schemaId, this::createDescriptors); + } + + protected DescriptorTree supplyDescriptorTree( + int schemaId) + { + return tree.computeIfAbsent(schemaId, this::createDescriptorTree); + } + + protected byte[] encodeIndexes() + { + int size = indexes.size(); + + byte[] indexes = new byte[size * 5]; + + int index = 0; + for (int i = 0; i < size; i++) + { + int entry = this.indexes.get(i); + int value = (entry << 1) ^ (entry >> 31); + while ((value & ~0x7F) != 0) + { + indexes[index++] = (byte) ((value & 0x7F) | 0x80); + value >>>= 7; + } + indexes[index++] = (byte) value; + } + + return Arrays.copyOf(indexes, index); + } + + protected int decodeIndexes( + DirectBuffer data, + int index, + int length) + { + int progress = 0; + indexes.clear(); + int encodedLength = decodeIndex(data.getByte(index)); + progress += BitUtil.SIZE_OF_BYTE; + if (encodedLength == 0) + { + indexes.add(encodedLength); + } + for (int i = 0; i < encodedLength; i++) + { + indexes.add(decodeIndex(data.getByte(index + progress))); + progress += BitUtil.SIZE_OF_BYTE; + } + return progress; + } + + protected int supplyIndexPadding( + int schemaId) + { + return paddings.computeIfAbsent(schemaId, this::calculateIndexPadding); + } + + protected int supplyJsonFormatPadding( + int schemaId) + { + return paddings.computeIfAbsent(schemaId, id -> calculateJsonFormatPadding(supplyDescriptor(id))); + } + + protected DynamicMessage.Builder supplyDynamicMessageBuilder( + Descriptors.Descriptor descriptor) + { + DynamicMessage.Builder builder; + if (builders.containsKey(descriptor.getFullName())) + { + builder = builders.get(descriptor.getFullName()); + } + else + { + builder = createDynamicMessageBuilder(descriptor); + builders.put(descriptor.getFullName(), builder); + } + return builder; + } + + private DynamicMessage.Builder createDynamicMessageBuilder( + Descriptors.Descriptor descriptor) + { + return DynamicMessage.newBuilder(descriptor); + } + + private int decodeIndex( + byte encodedByte) + { + int result = 0; + int shift = 0; + do + { + result |= (encodedByte & 0x7F) << shift; + shift += 7; + } + while ((encodedByte & 0x80) != 0); + return (result >>> 1) ^ -(result & 1); + } + + private int calculateIndexPadding( + int schemaId) + { + int padding = 0; + DescriptorTree trees = supplyDescriptorTree(schemaId); + if (trees != null && catalog.record != null) + { + DescriptorTree tree = trees.findByName(catalog.record); + if (tree != null) + { + padding = tree.indexes.size() + 1; + } + } + return padding; + } + + private int calculateJsonFormatPadding( + FileDescriptor descriptor) + { + int padding = 0; + + if (descriptor != null) + { + for (Descriptors.Descriptor message : descriptor.getMessageTypes()) + { + padding += JSON_OBJECT_CURLY_BRACES; + for (Descriptors.FieldDescriptor field : message.getFields()) + { + padding += field.getName().getBytes().length + JSON_FIELD_STRUCTURE_LENGTH; + } + } + + } + return padding; + } + + private FileDescriptor createDescriptors( + int schemaId) + { + FileDescriptor descriptor = null; + + String schemaText = handler.resolve(schemaId); + if (schemaText != null) + { + CharStream input = CharStreams.fromString(schemaText); + Protobuf3Lexer lexer = new Protobuf3Lexer(input); + CommonTokenStream tokens = new CommonTokenStream(lexer); + + Protobuf3Parser parser = new Protobuf3Parser(tokens); + parser.setErrorHandler(new BailErrorStrategy()); + ParseTreeWalker walker = new ParseTreeWalker(); + + ProtoListener listener = new ProtoListener(); + walker.walk(listener, parser.proto()); + + try + { + descriptor = FileDescriptor.buildFrom(listener.build(), dependencies); + } + catch (DescriptorValidationException ex) + { + ex.printStackTrace(); + } + } + return descriptor; + } + + private DescriptorTree createDescriptorTree( + int schemaId) + { + DescriptorTree tree = null; + FileDescriptor descriptor = supplyDescriptor(schemaId); + + if (descriptor != null) + { + tree = new DescriptorTree(descriptor); + } + return tree; + } +} diff --git a/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufReadConverterHandler.java b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufReadConverterHandler.java new file mode 100644 index 0000000000..495c736f0b --- /dev/null +++ b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufReadConverterHandler.java @@ -0,0 +1,155 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.protobuf.internal; + +import static io.aklivity.zilla.runtime.engine.catalog.CatalogHandler.NO_SCHEMA_ID; + +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; + +import com.google.protobuf.Descriptors; +import com.google.protobuf.DynamicMessage; +import com.google.protobuf.util.JsonFormat; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.protobuf.config.ProtobufModelConfig; + +public class ProtobufReadConverterHandler extends ProtobufModelHandler implements ConverterHandler +{ + private final JsonFormat.Printer printer; + private final OutputStreamWriter output; + + public ProtobufReadConverterHandler( + ProtobufModelConfig config, + LongFunction supplyCatalog) + { + super(config, supplyCatalog); + this.printer = JsonFormat.printer() + .omittingInsignificantWhitespace() + .preservingProtoFieldNames() + .includingDefaultValueFields(); + this.output = new OutputStreamWriter(out); + } + + @Override + public int padding( + DirectBuffer data, + int index, + int length) + { + int padding = 0; + if (VIEW_JSON.equals(view)) + { + int schemaId = handler.resolve(data, index, length); + + if (schemaId == NO_SCHEMA_ID) + { + schemaId = catalog.id != NO_SCHEMA_ID + ? catalog.id + : handler.resolve(subject, catalog.version); + } + padding = supplyJsonFormatPadding(schemaId); + } + return padding; + } + + @Override + public int convert( + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + return handler.decode(data, index, length, next, this::decodePayload); + } + + private int decodePayload( + int schemaId, + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + if (schemaId == NO_SCHEMA_ID) + { + if (catalog.id != NO_SCHEMA_ID) + { + schemaId = catalog.id; + } + else + { + schemaId = handler.resolve(subject, catalog.version); + } + } + + int progress = decodeIndexes(data, index, length); + + return validate(schemaId, data, index + progress, length - progress, next); + } + + private int validate( + int schemaId, + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + int valLength = -1; + DescriptorTree tree = supplyDescriptorTree(schemaId); + if (tree != null) + { + Descriptors.Descriptor descriptor = tree.findByIndexes(indexes); + if (descriptor != null) + { + in.wrap(data, index, length); + DynamicMessage.Builder builder = supplyDynamicMessageBuilder(descriptor); + validate: + try + { + DynamicMessage message = builder.mergeFrom(in).build(); + builder.clear(); + if (!message.getUnknownFields().asMap().isEmpty()) + { + break validate; + } + + if (VIEW_JSON.equals(view)) + { + out.wrap(out.buffer()); + printer.appendTo(message, output); + output.flush(); + valLength = out.position(); + next.accept(out.buffer(), 0, valLength); + } + else + { + next.accept(data, index, length); + valLength = length; + } + } + catch (IOException ex) + { + ex.printStackTrace(); + } + } + } + return valLength; + } +} diff --git a/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufWriteConverterHandler.java b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufWriteConverterHandler.java new file mode 100644 index 0000000000..d711d841ac --- /dev/null +++ b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufWriteConverterHandler.java @@ -0,0 +1,187 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.protobuf.internal; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.agrona.io.DirectBufferInputStream; + +import com.google.protobuf.Descriptors; +import com.google.protobuf.DynamicMessage; +import com.google.protobuf.util.JsonFormat; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.model.protobuf.config.ProtobufModelConfig; + +public class ProtobufWriteConverterHandler extends ProtobufModelHandler implements ConverterHandler +{ + private final DirectBuffer indexesRO; + private final InputStreamReader input; + private final DirectBufferInputStream in; + private final JsonFormat.Parser parser; + + public ProtobufWriteConverterHandler( + ProtobufModelConfig config, + LongFunction supplyCatalog) + { + super(config, supplyCatalog); + this.indexesRO = new UnsafeBuffer(); + this.in = new DirectBufferInputStream(); + this.input = new InputStreamReader(in); + this.parser = JsonFormat.parser(); + } + + @Override + public int padding( + DirectBuffer data, + int index, + int length) + { + int schemaId = catalog != null && catalog.id > 0 + ? catalog.id + : handler.resolve(subject, catalog.version); + + return handler.encodePadding() + supplyIndexPadding(schemaId); + } + + @Override + public int convert( + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + int valLength = -1; + + int schemaId = catalog != null && catalog.id > 0 + ? catalog.id + : handler.resolve(subject, catalog.version); + + if (VIEW_JSON.equals(view)) + { + valLength = handler.encode(schemaId, data, index, length, next, this::serializeJsonRecord); + } + else if (validate(schemaId, data, index, length)) + { + valLength = handler.encode(schemaId, data, index, length, next, this::encode); + } + return valLength; + } + + private boolean validate( + int schemaId, + DirectBuffer buffer, + int index, + int length) + { + boolean status = false; + DescriptorTree trees = supplyDescriptorTree(schemaId); + if (trees != null && catalog.record != null) + { + DescriptorTree tree = trees.findByName(catalog.record); + if (tree != null) + { + Descriptors.Descriptor descriptor = tree.descriptor; + indexes.clear(); + indexes.add(tree.indexes.size()); + indexes.addAll(tree.indexes); + in.wrap(buffer, index, length); + DynamicMessage.Builder builder = supplyDynamicMessageBuilder(descriptor); + try + { + DynamicMessage message = builder.mergeFrom(in).build(); + builder.clear(); + status = message.getUnknownFields().asMap().isEmpty(); + } + catch (IOException ex) + { + ex.printStackTrace(); + } + } + } + return status; + } + + private int encode( + int schemaId, + DirectBuffer buffer, + int index, + int length, + ValueConsumer next) + { + int valLength = -1; + if (indexes.size() == 2 && indexes.get(0) == 1 && indexes.get(1) == 0) + { + indexesRO.wrap(ZERO_INDEX); + valLength = 1; + } + else + { + indexesRO.wrap(encodeIndexes()); + valLength = indexes.size(); + } + indexes.clear(); + next.accept(indexesRO, 0, valLength); + next.accept(buffer, index, length); + return valLength + length; + } + + private int serializeJsonRecord( + int schemaId, + DirectBuffer buffer, + int index, + int length, + ValueConsumer next) + { + int valLength = -1; + DescriptorTree tree = supplyDescriptorTree(schemaId); + if (tree != null && catalog.record != null) + { + tree = tree.findByName(catalog.record); + if (tree != null) + { + Descriptors.Descriptor descriptor = tree.descriptor; + indexes.clear(); + indexes.add(tree.indexes.size()); + indexes.addAll(tree.indexes); + DynamicMessage.Builder builder = supplyDynamicMessageBuilder(descriptor); + in.wrap(buffer, index, length); + try + { + parser.merge(input, builder); + DynamicMessage message = builder.build(); + builder.clear(); + if (message.isInitialized() && message.getUnknownFields().asMap().isEmpty()) + { + out.wrap(out.buffer()); + message.writeTo(out); + valLength = encode(schemaId, out.buffer(), 0, out.position(), next); + } + } + catch (IOException ex) + { + ex.printStackTrace(); + } + } + } + return valLength; + } +} diff --git a/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/config/ProtobufModelConfigAdapter.java b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/config/ProtobufModelConfigAdapter.java new file mode 100644 index 0000000000..495eff2ca7 --- /dev/null +++ b/incubator/model-protobuf/src/main/java/io/aklivity/zilla/runtime/model/protobuf/internal/config/ProtobufModelConfigAdapter.java @@ -0,0 +1,114 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.protobuf.internal.config; + +import java.util.LinkedList; +import java.util.List; + +import jakarta.json.Json; +import jakarta.json.JsonArray; +import jakarta.json.JsonArrayBuilder; +import jakarta.json.JsonObject; +import jakarta.json.JsonObjectBuilder; +import jakarta.json.JsonValue; +import jakarta.json.bind.adapter.JsonbAdapter; + +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi; +import io.aklivity.zilla.runtime.engine.config.SchemaConfig; +import io.aklivity.zilla.runtime.engine.config.SchemaConfigAdapter; +import io.aklivity.zilla.runtime.model.protobuf.config.ProtobufModelConfig; + +public final class ProtobufModelConfigAdapter implements ModelConfigAdapterSpi, JsonbAdapter +{ + private static final String PROTOBUF = "protobuf"; + private static final String MODEL_NAME = "model"; + private static final String CATALOG_NAME = "catalog"; + private static final String SUBJECT_NAME = "subject"; + private static final String VIEW = "view"; + + private final SchemaConfigAdapter schema = new SchemaConfigAdapter(); + + @Override + public String type() + { + return PROTOBUF; + } + + @Override + public JsonValue adaptToJson( + ModelConfig config) + { + ProtobufModelConfig protobufConfig = (ProtobufModelConfig) config; + JsonObjectBuilder converter = Json.createObjectBuilder(); + converter.add(MODEL_NAME, PROTOBUF); + + if (protobufConfig.view != null) + { + converter.add(VIEW, protobufConfig.view); + } + + if (protobufConfig.cataloged != null && !protobufConfig.cataloged.isEmpty()) + { + JsonObjectBuilder catalogs = Json.createObjectBuilder(); + for (CatalogedConfig catalog : protobufConfig.cataloged) + { + JsonArrayBuilder array = Json.createArrayBuilder(); + for (SchemaConfig schemaItem: catalog.schemas) + { + array.add(schema.adaptToJson(schemaItem)); + } + catalogs.add(catalog.name, array); + } + converter.add(CATALOG_NAME, catalogs); + } + return converter.build(); + } + + @Override + public ModelConfig adaptFromJson( + JsonValue value) + { + JsonObject object = (JsonObject) value; + + assert object.containsKey(CATALOG_NAME); + + JsonObject catalogsJson = object.getJsonObject(CATALOG_NAME); + List catalogs = new LinkedList<>(); + for (String catalogName: catalogsJson.keySet()) + { + JsonArray schemasJson = catalogsJson.getJsonArray(catalogName); + List schemas = new LinkedList<>(); + for (JsonValue item : schemasJson) + { + JsonObject schemaJson = (JsonObject) item; + SchemaConfig schemaElement = schema.adaptFromJson(schemaJson); + schemas.add(schemaElement); + } + catalogs.add(new CatalogedConfig(catalogName, schemas)); + } + + String subject = object.containsKey(SUBJECT_NAME) + ? object.getString(SUBJECT_NAME) + : null; + + String view = object.containsKey(VIEW) + ? object.getString(VIEW) + : null; + + return new ProtobufModelConfig(catalogs, subject, view); + } +} diff --git a/incubator/model-protobuf/src/main/moditect/module-info.java b/incubator/model-protobuf/src/main/moditect/module-info.java new file mode 100644 index 0000000000..3fc4b59e58 --- /dev/null +++ b/incubator/model-protobuf/src/main/moditect/module-info.java @@ -0,0 +1,28 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +module io.aklivity.zilla.runtime.model.protobuf +{ + requires org.antlr.antlr4.runtime; + requires protobuf.java; + requires io.aklivity.zilla.runtime.engine; + + exports io.aklivity.zilla.runtime.model.protobuf.config; + + provides io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi + with io.aklivity.zilla.runtime.model.protobuf.internal.config.ProtobufModelConfigAdapter; + + provides io.aklivity.zilla.runtime.engine.model.ModelFactorySpi + with io.aklivity.zilla.runtime.model.protobuf.internal.ProtobufModelFactorySpi; +} diff --git a/incubator/model-protobuf/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi b/incubator/model-protobuf/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi new file mode 100644 index 0000000000..187d9d722b --- /dev/null +++ b/incubator/model-protobuf/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi @@ -0,0 +1 @@ +io.aklivity.zilla.runtime.model.protobuf.internal.config.ProtobufModelConfigAdapter diff --git a/incubator/model-protobuf/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi b/incubator/model-protobuf/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi new file mode 100644 index 0000000000..98f696ae0b --- /dev/null +++ b/incubator/model-protobuf/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi @@ -0,0 +1 @@ +io.aklivity.zilla.runtime.model.protobuf.internal.ProtobufModelFactorySpi diff --git a/incubator/model-protobuf/src/test/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelFactorySpiTest.java b/incubator/model-protobuf/src/test/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelFactorySpiTest.java new file mode 100644 index 0000000000..27a7f643a5 --- /dev/null +++ b/incubator/model-protobuf/src/test/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelFactorySpiTest.java @@ -0,0 +1,57 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.protobuf.internal; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.mock; + +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.engine.model.ModelFactory; +import io.aklivity.zilla.runtime.model.protobuf.config.ProtobufModelConfig; + +public class ProtobufModelFactorySpiTest +{ + @Test + public void shouldLoadAndCreate() + { + Configuration config = new Configuration(); + ModelFactory factory = ModelFactory.instantiate(); + Model model = factory.create("protobuf", config); + + ModelContext context = new ProtobufModelContext(mock(EngineContext.class)); + + ModelConfig modelConfig = ProtobufModelConfig.builder() + .subject("test-value") + .catalog() + .name("test0") + .schema() + .subject("subject1") + .version("latest") + .build() + .build() + .build(); + + assertThat(model, instanceOf(ProtobufModel.class)); + assertThat(context.supplyReadConverterHandler(modelConfig), instanceOf(ProtobufReadConverterHandler.class)); + assertThat(context.supplyWriteConverterHandler(modelConfig), instanceOf(ProtobufWriteConverterHandler.class)); + } +} diff --git a/incubator/model-protobuf/src/test/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelTest.java b/incubator/model-protobuf/src/test/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelTest.java new file mode 100644 index 0000000000..746e46f8d9 --- /dev/null +++ b/incubator/model-protobuf/src/test/java/io/aklivity/zilla/runtime/model/protobuf/internal/ProtobufModelTest.java @@ -0,0 +1,384 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.model.protobuf.internal; + +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DIRECTORY; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; + +import java.nio.charset.StandardCharsets; +import java.util.Properties; +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Before; +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.catalog.Catalog; +import io.aklivity.zilla.runtime.engine.catalog.CatalogContext; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.CatalogConfig; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.engine.test.internal.catalog.TestCatalog; +import io.aklivity.zilla.runtime.engine.test.internal.catalog.config.TestCatalogOptionsConfig; +import io.aklivity.zilla.runtime.model.protobuf.config.ProtobufModelConfig; + +public class ProtobufModelTest +{ + private static final String SCHEMA = "syntax = \"proto3\";" + + "package io.aklivity.examples.clients.proto;" + + "message SimpleMessage " + + "{ " + + "string content = 1;" + + "optional string date_time = 2;" + + "message DeviceMessage2 " + + "{ " + + "int32 id = 1;" + + "message DeviceMessage6 " + + "{ " + + "int32 id = 1;" + + "}" + + "}" + + "DeviceMessage2 device = 3;" + + "}" + + "message DemoMessage " + + "{ " + + "string status = 1;" + + "message DeviceMessage " + + "{ " + + "int32 id = 1;" + + "}" + + "message DeviceMessage1 " + + "{ " + + "int32 id = 1;" + + "}" + + "optional string date_time = 2;" + + "message SimpleMessage " + + "{ " + + "string content = 1;" + + "optional string date_time = 2;" + + "}" + + "}"; + private CatalogContext context; + + @Before + public void init() + { + Properties properties = new Properties(); + properties.setProperty(ENGINE_DIRECTORY.name(), "target/zilla-itests"); + Configuration config = new Configuration(properties); + Catalog catalog = new TestCatalog(config); + context = catalog.supply(mock(EngineContext.class)); + } + + @Test + public void shouldWriteValidProtobufEvent() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(SCHEMA) + .build()); + + ProtobufModelConfig config = ProtobufModelConfig.builder() + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .record("SimpleMessage") + .build() + .build() + .build(); + LongFunction handler = value -> context.attach(catalogConfig); + ProtobufWriteConverterHandler converter = new ProtobufWriteConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x0a, 0x02, 0x4f, 0x4b, 0x12, 0x08, 0x30, 0x31, 0x30, 0x31, 0x32, 0x30, 0x32, 0x34}; + data.wrap(bytes, 0, bytes.length); + assertEquals(data.capacity() + 1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + + assertEquals(data.capacity() + 1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldWriteValidProtobufEventNestedMessage() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(SCHEMA) + .build()); + + ProtobufModelConfig config = ProtobufModelConfig.builder() + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .record("DemoMessage.SimpleMessage") + .build() + .build() + .build(); + LongFunction handler = value -> context.attach(catalogConfig); + ProtobufWriteConverterHandler converter = new ProtobufWriteConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x0a, 0x02, 0x4f, 0x4b, 0x12, 0x08, 0x30, 0x31, 0x30, 0x31, 0x32, 0x30, 0x32, 0x34}; + data.wrap(bytes, 0, bytes.length); + assertEquals(data.capacity() + 3, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldWriteValidProtobufEventIncorrectRecordName() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(SCHEMA) + .build()); + + ProtobufModelConfig config = ProtobufModelConfig.builder() + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .record("DemoMessage.IncorrectRecord") + .build() + .build() + .build(); + LongFunction handler = value -> context.attach(catalogConfig); + ProtobufWriteConverterHandler converter = new ProtobufWriteConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x0a, 0x02, 0x4f, 0x4b, 0x12, 0x08, 0x30, 0x31, 0x30, 0x31, 0x32, 0x30, 0x32, 0x34}; + data.wrap(bytes, 0, bytes.length); + assertEquals(-1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldReadValidProtobufEvent() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(SCHEMA) + .build()); + + ProtobufModelConfig config = ProtobufModelConfig.builder() + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .build() + .build() + .build(); + LongFunction handler = value -> context.attach(catalogConfig); + ProtobufReadConverterHandler converter = new ProtobufReadConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x00, 0x0a, 0x02, 0x4f, 0x4b, 0x12, 0x08, 0x30, 0x31, 0x30, 0x31, 0x32, 0x30, 0x32, 0x34}; + data.wrap(bytes, 0, bytes.length); + assertEquals(data.capacity() - 1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + + assertEquals(data.capacity() - 1, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldReadValidProtobufEventNestedMessage() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(SCHEMA) + .build()); + + ProtobufModelConfig config = ProtobufModelConfig.builder() + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .build() + .build() + .build(); + LongFunction handler = value -> context.attach(catalogConfig); + ProtobufReadConverterHandler converter = new ProtobufReadConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x04, 0x02, 0x04, 0x0a, 0x02, 0x4f, 0x4b, 0x12, 0x08, 0x30, 0x31, 0x30, 0x31, 0x32, 0x30, 0x32, 0x34}; + data.wrap(bytes, 0, bytes.length); + assertEquals(data.capacity() - 3, converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldReadValidProtobufEventFormatJson() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(SCHEMA) + .build()); + + ProtobufModelConfig config = ProtobufModelConfig.builder() + .view("json") + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .build() + .build() + .build(); + + LongFunction handler = value -> context.attach(catalogConfig); + ProtobufReadConverterHandler converter = new ProtobufReadConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0x00, 0x0a, 0x02, 0x4f, 0x4b, 0x12, 0x08, 0x30, 0x31, 0x30, 0x31, 0x32, 0x30, 0x32, 0x34}; + data.wrap(bytes, 0, bytes.length); + + String json = + "{" + + "\"content\":\"OK\"," + + "\"date_time\":\"01012024\"" + + "}"; + + final ValueConsumer consumer = (buffer, index, length) -> + { + byte[] jsonBytes = new byte[length]; + buffer.getBytes(index, jsonBytes); + assertEquals(json, new String(jsonBytes, StandardCharsets.UTF_8)); + }; + converter.convert(data, 0, data.capacity(), consumer); + + converter.convert(data, 0, data.capacity(), consumer); + } + + @Test + public void shouldWriteValidProtobufEventFormatJson() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(1) + .schema(SCHEMA) + .build()); + + ProtobufModelConfig config = ProtobufModelConfig.builder() + .view("json") + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .record("SimpleMessage") + .build() + .build() + .build(); + + LongFunction handler = value -> context.attach(catalogConfig); + ProtobufWriteConverterHandler converter = new ProtobufWriteConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + String json = + "{" + + "\"content\":\"OK\"," + + "\"date_time\":\"01012024\"" + + "}"; + data.wrap(json.getBytes(), 0, json.getBytes().length); + + byte[] expectedBytes = {0x00, 0x0a, 0x02, 0x4f, 0x4b, 0x12, 0x08, 0x30, 0x31, 0x30, 0x31, 0x32, 0x30, 0x32, 0x34}; + DirectBuffer expected = new UnsafeBuffer(); + expected.wrap(expectedBytes, 0, expectedBytes.length); + + assertEquals(expected.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + + assertEquals(expected.capacity(), converter.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } + + @Test + public void shouldVerifyJsonFormatPaddingLength() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + ProtobufModelConfig config = ProtobufModelConfig.builder() + .view("json") + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .build() + .build() + .build(); + ProtobufReadConverterHandler converter = new ProtobufReadConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + assertEquals(71, converter.padding(data, 0, data.capacity())); + } + + @Test + public void shouldVerifyIndexPaddingLength() + { + CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", + TestCatalogOptionsConfig.builder() + .id(9) + .schema(SCHEMA) + .build()); + LongFunction handler = value -> context.attach(catalogConfig); + ProtobufModelConfig config = ProtobufModelConfig.builder() + .catalog() + .name("test0") + .schema() + .strategy("topic") + .version("latest") + .subject("test-value") + .record("DemoMessage.SimpleMessage") + .build() + .build() + .build(); + ProtobufWriteConverterHandler converter = new ProtobufWriteConverterHandler(config, handler); + + DirectBuffer data = new UnsafeBuffer(); + + assertEquals(3, converter.padding(data, 0, data.capacity())); + + } +} diff --git a/incubator/validator-avro/src/test/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfigAdapterTest.java b/incubator/model-protobuf/src/test/java/io/aklivity/zilla/runtime/model/protobuf/internal/config/ProtobufModelConfigAdapterTest.java similarity index 65% rename from incubator/validator-avro/src/test/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfigAdapterTest.java rename to incubator/model-protobuf/src/test/java/io/aklivity/zilla/runtime/model/protobuf/internal/config/ProtobufModelConfigAdapterTest.java index 4e7e123c98..9c95ad4832 100644 --- a/incubator/validator-avro/src/test/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfigAdapterTest.java +++ b/incubator/model-protobuf/src/test/java/io/aklivity/zilla/runtime/model/protobuf/internal/config/ProtobufModelConfigAdapterTest.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.validator.avro.config; +package io.aklivity.zilla.runtime.model.protobuf.internal.config; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; @@ -26,7 +26,9 @@ import org.junit.Before; import org.junit.Test; -public class AvroValidatorConfigAdapterTest +import io.aklivity.zilla.runtime.model.protobuf.config.ProtobufModelConfig; + +public class ProtobufModelConfigAdapterTest { private Jsonb jsonb; @@ -34,17 +36,17 @@ public class AvroValidatorConfigAdapterTest public void initJson() { JsonbConfig config = new JsonbConfig() - .withAdapters(new AvroValidatorConfigAdapter()); + .withAdapters(new ProtobufModelConfigAdapter()); jsonb = JsonbBuilder.create(config); } @Test - public void shouldReadAvroValidator() + public void shouldReadAvroConverter() { // GIVEN String json = "{" + - "\"type\": \"avro\"," + + "\"model\": \"protobuf\"," + "\"catalog\":" + "{" + "\"test0\":" + @@ -65,32 +67,32 @@ public void shouldReadAvroValidator() "}"; // WHEN - AvroValidatorConfig validator = jsonb.fromJson(json, AvroValidatorConfig.class); + ProtobufModelConfig converter = jsonb.fromJson(json, ProtobufModelConfig.class); // THEN - assertThat(validator, not(nullValue())); - assertThat(validator.type, equalTo("avro")); - assertThat(validator.catalogs.size(), equalTo(1)); - assertThat(validator.catalogs.get(0).name, equalTo("test0")); - assertThat(validator.catalogs.get(0).schemas.get(0).strategy, equalTo("topic")); - assertThat(validator.catalogs.get(0).schemas.get(0).version, equalTo("latest")); - assertThat(validator.catalogs.get(0).schemas.get(0).id, equalTo(0)); - assertThat(validator.catalogs.get(0).schemas.get(1).subject, equalTo("cat")); - assertThat(validator.catalogs.get(0).schemas.get(1).strategy, nullValue()); - assertThat(validator.catalogs.get(0).schemas.get(1).version, equalTo("latest")); - assertThat(validator.catalogs.get(0).schemas.get(1).id, equalTo(0)); - assertThat(validator.catalogs.get(0).schemas.get(2).strategy, nullValue()); - assertThat(validator.catalogs.get(0).schemas.get(2).version, nullValue()); - assertThat(validator.catalogs.get(0).schemas.get(2).id, equalTo(42)); + assertThat(converter, not(nullValue())); + assertThat(converter.model, equalTo("protobuf")); + assertThat(converter.cataloged.size(), equalTo(1)); + assertThat(converter.cataloged.get(0).name, equalTo("test0")); + assertThat(converter.cataloged.get(0).schemas.get(0).strategy, equalTo("topic")); + assertThat(converter.cataloged.get(0).schemas.get(0).version, equalTo("latest")); + assertThat(converter.cataloged.get(0).schemas.get(0).id, equalTo(0)); + assertThat(converter.cataloged.get(0).schemas.get(1).subject, equalTo("cat")); + assertThat(converter.cataloged.get(0).schemas.get(1).strategy, nullValue()); + assertThat(converter.cataloged.get(0).schemas.get(1).version, equalTo("latest")); + assertThat(converter.cataloged.get(0).schemas.get(1).id, equalTo(0)); + assertThat(converter.cataloged.get(0).schemas.get(2).strategy, nullValue()); + assertThat(converter.cataloged.get(0).schemas.get(2).version, nullValue()); + assertThat(converter.cataloged.get(0).schemas.get(2).id, equalTo(42)); } @Test - public void shouldWriteAvroValidator() + public void shouldWriteAvroConverter() { // GIVEN String expectedJson = "{" + - "\"type\":\"avro\"," + + "\"model\":\"protobuf\"," + "\"catalog\":" + "{" + "\"test0\":" + @@ -109,7 +111,7 @@ public void shouldWriteAvroValidator() "]" + "}" + "}"; - AvroValidatorConfig validator = AvroValidatorConfig.builder() + ProtobufModelConfig converter = ProtobufModelConfig.builder() .catalog() .name("test0") .schema() @@ -127,7 +129,7 @@ public void shouldWriteAvroValidator() .build(); // WHEN - String json = jsonb.toJson(validator); + String json = jsonb.toJson(converter); // THEN assertThat(json, not(nullValue())); diff --git a/incubator/pom.xml b/incubator/pom.xml index 83e67412d4..48c1798af4 100644 --- a/incubator/pom.xml +++ b/incubator/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - 0.9.66 + 0.9.67 ../pom.xml @@ -21,9 +21,10 @@ catalog-inline.spec catalog-schema-registry.spec exporter-otlp.spec - validator-avro.spec - validator-core.spec - validator-json.spec + model-avro.spec + model-core.spec + model-json.spec + model-protobuf.spec binding-amqp @@ -37,9 +38,10 @@ exporter-otlp - validator-avro - validator-core - validator-json + model-avro + model-core + model-json + model-protobuf @@ -86,17 +88,22 @@ ${project.groupId} - validator-avro + model-avro ${project.version} ${project.groupId} - validator-core + model-core ${project.version} ${project.groupId} - validator-json + model-json + ${project.version} + + + ${project.groupId} + model-protobuf ${project.version} diff --git a/incubator/validator-avro.spec/src/main/scripts/io/aklivity/zilla/specs/validator/avro/schema/avro.schema.patch.json b/incubator/validator-avro.spec/src/main/scripts/io/aklivity/zilla/specs/validator/avro/schema/avro.schema.patch.json deleted file mode 100644 index 1d451f5fb2..0000000000 --- a/incubator/validator-avro.spec/src/main/scripts/io/aklivity/zilla/specs/validator/avro/schema/avro.schema.patch.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "op": "add", - "path": "/$defs/validator/types/enum/-", - "value": "avro" - } -] diff --git a/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/AvroValidator.java b/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/AvroValidator.java deleted file mode 100644 index 3b69dae544..0000000000 --- a/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/AvroValidator.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.avro; - -import java.nio.ByteBuffer; -import java.util.List; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; -import java.util.stream.Collectors; - -import org.agrona.DirectBuffer; -import org.agrona.collections.Long2ObjectHashMap; -import org.apache.avro.Schema; -import org.apache.avro.Schema.Parser; -import org.apache.avro.generic.GenericDatumReader; -import org.apache.avro.io.DatumReader; -import org.apache.avro.io.DecoderFactory; - -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; -import io.aklivity.zilla.runtime.engine.config.SchemaConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.validator.avro.config.AvroValidatorConfig; - -public final class AvroValidator implements Validator -{ - private static final byte MAGIC_BYTE = 0x0; - - private final List catalogs; - private final SchemaConfig catalog; - private final Long2ObjectHashMap handlersById; - private final CatalogHandler handler; - private final DecoderFactory decoder; - private final String subject; - private DatumReader reader; - private Parser parser; - - public AvroValidator( - AvroValidatorConfig config, - ToLongFunction resolveId, - LongFunction supplyCatalog) - { - this.handlersById = new Long2ObjectHashMap<>(); - this.decoder = DecoderFactory.get(); - this.catalogs = config.catalogs.stream().map(c -> - { - c.id = resolveId.applyAsLong(c.name); - handlersById.put(c.id, supplyCatalog.apply(c.id)); - return c; - }).collect(Collectors.toList()); - this.handler = handlersById.get(catalogs.get(0).id); - this.parser = new Schema.Parser(); - this.catalog = catalogs.get(0).schemas.size() != 0 ? catalogs.get(0).schemas.get(0) : null; - this.subject = config.subject; - } - - @Override - public boolean read( - DirectBuffer data, - int index, - int length) - { - boolean status = false; - byte[] payloadBytes = new byte[length]; - data.getBytes(0, payloadBytes); - ByteBuffer byteBuf = ByteBuffer.wrap(payloadBytes); - - if (byteBuf.get() == MAGIC_BYTE) - { - int schemaId = byteBuf.getInt(); - int valLength = length - 1 - 4; - byte[] valBytes = new byte[valLength]; - data.getBytes(length - valLength, valBytes); - - String schema = handler.resolve(schemaId); - - if (schema != null && validate(schema, valBytes)) - { - status = true; - } - } - return status; - } - - @Override - public boolean write( - DirectBuffer data, - int index, - int length) - { - boolean status = false; - String schema = null; - int schemaId = catalog != null ? catalog.id : 0; - - byte[] payloadBytes = new byte[length]; - data.getBytes(0, payloadBytes); - - if (schemaId > 0) - { - schema = handler.resolve(schemaId); - } - else if (catalog != null && "topic".equals(catalog.strategy)) - { - schemaId = handler.resolve(subject, catalog.version); - if (schemaId > 0) - { - schema = handler.resolve(schemaId); - } - } - - if (schema != null && validate(schema, payloadBytes)) - { - status = true; - } - - return status; - } - - private boolean validate( - String schema, - byte[] payloadBytes) - { - boolean status = false; - try - { - reader = new GenericDatumReader(parser.parse(schema)); - reader.read(null, decoder.binaryDecoder(payloadBytes, null)); - status = true; - } - catch (Exception e) - { - } - return status; - } -} diff --git a/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfigAdapter.java b/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfigAdapter.java deleted file mode 100644 index b244bf2787..0000000000 --- a/incubator/validator-avro/src/main/java/io/aklivity/zilla/runtime/validator/avro/config/AvroValidatorConfigAdapter.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.avro.config; - -import java.util.LinkedList; -import java.util.List; - -import jakarta.json.Json; -import jakarta.json.JsonArray; -import jakarta.json.JsonArrayBuilder; -import jakarta.json.JsonObject; -import jakarta.json.JsonObjectBuilder; -import jakarta.json.JsonValue; -import jakarta.json.bind.adapter.JsonbAdapter; - -import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; -import io.aklivity.zilla.runtime.engine.config.SchemaConfig; -import io.aklivity.zilla.runtime.engine.config.SchemaConfigAdapter; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi; - -public final class AvroValidatorConfigAdapter implements ValidatorConfigAdapterSpi, JsonbAdapter -{ - private static final String AVRO = "avro"; - private static final String TYPE_NAME = "type"; - private static final String CATALOG_NAME = "catalog"; - private static final String SUBJECT = "subject"; - - private final SchemaConfigAdapter schema = new SchemaConfigAdapter(); - - @Override - public String type() - { - return AVRO; - } - - @Override - public JsonValue adaptToJson( - ValidatorConfig config) - { - AvroValidatorConfig validatorConfig = (AvroValidatorConfig) config; - JsonObjectBuilder validator = Json.createObjectBuilder(); - validator.add(TYPE_NAME, AVRO); - if (validatorConfig.catalogs != null && !validatorConfig.catalogs.isEmpty()) - { - JsonObjectBuilder catalogs = Json.createObjectBuilder(); - for (CatalogedConfig catalog : validatorConfig.catalogs) - { - JsonArrayBuilder array = Json.createArrayBuilder(); - for (SchemaConfig schemaItem: catalog.schemas) - { - array.add(schema.adaptToJson(schemaItem)); - } - catalogs.add(catalog.name, array); - } - validator.add(CATALOG_NAME, catalogs); - } - return validator.build(); - } - - @Override - public ValidatorConfig adaptFromJson( - JsonValue value) - { - JsonObject object = (JsonObject) value; - ValidatorConfig result = null; - if (object.containsKey(CATALOG_NAME)) - { - JsonObject catalogsJson = object.getJsonObject(CATALOG_NAME); - List catalogs = new LinkedList<>(); - for (String catalogName: catalogsJson.keySet()) - { - JsonArray schemasJson = catalogsJson.getJsonArray(catalogName); - List schemas = new LinkedList<>(); - for (JsonValue item : schemasJson) - { - JsonObject schemaJson = (JsonObject) item; - SchemaConfig schemaElement = schema.adaptFromJson(schemaJson); - schemas.add(schemaElement); - } - catalogs.add(new CatalogedConfig(catalogName, schemas)); - } - - String subject = object.containsKey(SUBJECT) - ? object.getString(SUBJECT) - : null; - - result = new AvroValidatorConfig(catalogs, subject); - } - return result; - } -} diff --git a/incubator/validator-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi b/incubator/validator-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi deleted file mode 100644 index aba3576a05..0000000000 --- a/incubator/validator-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi +++ /dev/null @@ -1 +0,0 @@ -io.aklivity.zilla.runtime.validator.avro.config.AvroValidatorConfigAdapter diff --git a/incubator/validator-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi b/incubator/validator-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi deleted file mode 100644 index 3282542a93..0000000000 --- a/incubator/validator-avro/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi +++ /dev/null @@ -1 +0,0 @@ -io.aklivity.zilla.runtime.validator.avro.AvroValidatorFactory diff --git a/incubator/validator-avro/src/test/java/io/aklivity/zilla/runtime/validator/avro/AvroValidatorFactoryTest.java b/incubator/validator-avro/src/test/java/io/aklivity/zilla/runtime/validator/avro/AvroValidatorFactoryTest.java deleted file mode 100644 index a4179ba7f1..0000000000 --- a/incubator/validator-avro/src/test/java/io/aklivity/zilla/runtime/validator/avro/AvroValidatorFactoryTest.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.avro; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; - -import java.util.List; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; - -import org.junit.Test; - -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.test.internal.catalog.TestCatalogHandler; -import io.aklivity.zilla.runtime.engine.test.internal.catalog.config.TestCatalogOptionsConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.validator.avro.config.AvroValidatorConfig; - -public class AvroValidatorFactoryTest -{ - @Test - public void shouldCreate() - { - // GIVEN - ValidatorConfig validator = new AvroValidatorConfig(List.of(new CatalogedConfig("test0", List.of())), "test-value"); - ToLongFunction resolveId = i -> 0L; - LongFunction supplyCatalog = i -> new TestCatalogHandler(new TestCatalogOptionsConfig("schema0")); - AvroValidatorFactory factory = new AvroValidatorFactory(); - - // WHEN - Validator avroValidator = factory.create(validator, resolveId, supplyCatalog); - - // THEN - assertThat(avroValidator, instanceOf(AvroValidator.class)); - } -} diff --git a/incubator/validator-avro/src/test/java/io/aklivity/zilla/runtime/validator/avro/AvroValidatorTest.java b/incubator/validator-avro/src/test/java/io/aklivity/zilla/runtime/validator/avro/AvroValidatorTest.java deleted file mode 100644 index bd43cdbcf3..0000000000 --- a/incubator/validator-avro/src/test/java/io/aklivity/zilla/runtime/validator/avro/AvroValidatorTest.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.avro; - -import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DIRECTORY; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; - -import java.util.Properties; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; - -import org.agrona.DirectBuffer; -import org.agrona.concurrent.UnsafeBuffer; -import org.junit.Before; -import org.junit.Test; - -import io.aklivity.zilla.runtime.engine.EngineConfiguration; -import io.aklivity.zilla.runtime.engine.EngineContext; -import io.aklivity.zilla.runtime.engine.catalog.Catalog; -import io.aklivity.zilla.runtime.engine.catalog.CatalogContext; -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.CatalogConfig; -import io.aklivity.zilla.runtime.engine.internal.LabelManager; -import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; -import io.aklivity.zilla.runtime.engine.test.internal.catalog.TestCatalog; -import io.aklivity.zilla.runtime.engine.test.internal.catalog.config.TestCatalogOptionsConfig; -import io.aklivity.zilla.runtime.validator.avro.config.AvroValidatorConfig; - -public class AvroValidatorTest -{ - private static final String SCHEMA = "{\"fields\":[{\"name\":\"id\",\"type\":\"string\"}," + - "{\"name\":\"status\",\"type\":\"string\"}]," + - "\"name\":\"Event\",\"namespace\":\"io.aklivity.example\",\"type\":\"record\"}"; - - private final AvroValidatorConfig avroConfig = AvroValidatorConfig.builder() - .catalog() - .name("test0") - .schema() - .strategy("topic") - .version("latest") - .subject("test-value") - .build() - .build() - .build(); - - private LabelManager labels; - private ToLongFunction resolveId; - private CatalogContext context; - - @Before - public void init() - { - Properties properties = new Properties(); - properties.setProperty(ENGINE_DIRECTORY.name(), "target/zilla-itests"); - EngineConfiguration config = new EngineConfiguration(properties); - labels = new LabelManager(config.directory()); - resolveId = name -> name != null ? NamespacedId.id(1, labels.supplyLabelId(name)) : 0L; - Catalog catalog = new TestCatalog(config); - context = catalog.supply(mock(EngineContext.class)); - } - - @Test - public void shouldVerifyValidAvroEvent() - { - CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", new TestCatalogOptionsConfig(SCHEMA)); - LongFunction handler = value -> context.attach(catalogConfig); - AvroValidator validator = new AvroValidator(avroConfig, resolveId, handler); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = {0x00, 0x00, 0x00, 0x00, 0x09, 0x06, 0x69, 0x64, - 0x30, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65}; - data.wrap(bytes, 0, bytes.length); - assertTrue(validator.read(data, 0, data.capacity())); - } - - @Test - public void shouldVerifyInvalidAvroEvent() - { - CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", new TestCatalogOptionsConfig(SCHEMA)); - LongFunction handler = value -> context.attach(catalogConfig); - AvroValidator validator = new AvroValidator(avroConfig, resolveId, handler); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = {0x00, 0x00, 0x00, 0x00, 0x09, 0x06, 0x69, 0x64, 0x30, 0x10}; - data.wrap(bytes, 0, bytes.length); - assertFalse(validator.read(data, 0, data.capacity())); - } - - @Test - public void shouldVerifyMagicBytes() - { - CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", new TestCatalogOptionsConfig(SCHEMA)); - LongFunction handler = value -> context.attach(catalogConfig); - AvroValidator validator = new AvroValidator(avroConfig, resolveId, handler); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = "Invalid Event".getBytes(); - data.wrap(bytes, 0, bytes.length); - assertFalse(validator.read(data, 0, data.capacity())); - } - - @Test - public void shouldVerifyInvalidSchemaId() - { - CatalogConfig catalogConfig = new CatalogConfig("test", "test0", "test", new TestCatalogOptionsConfig(SCHEMA)); - LongFunction handler = value -> context.attach(catalogConfig); - AvroValidator validator = new AvroValidator(avroConfig, resolveId, handler); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = {0x00, 0x00, 0x00, 0x00, 0x79, 0x06, 0x69, 0x64, 0x30, 0x10}; - data.wrap(bytes, 0, bytes.length); - assertFalse(validator.read(data, 0, data.capacity())); - } -} diff --git a/incubator/validator-core.spec/src/main/scripts/io/aklivity/zilla/specs/validator/core/schema/long.schema.patch.json b/incubator/validator-core.spec/src/main/scripts/io/aklivity/zilla/specs/validator/core/schema/long.schema.patch.json deleted file mode 100644 index fda2154cad..0000000000 --- a/incubator/validator-core.spec/src/main/scripts/io/aklivity/zilla/specs/validator/core/schema/long.schema.patch.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "op": "add", - "path": "/$defs/validator/types/enum/-", - "value": "long" - } -] diff --git a/incubator/validator-core.spec/src/main/scripts/io/aklivity/zilla/specs/validator/core/schema/string.schema.patch.json b/incubator/validator-core.spec/src/main/scripts/io/aklivity/zilla/specs/validator/core/schema/string.schema.patch.json deleted file mode 100644 index 6cee03d49d..0000000000 --- a/incubator/validator-core.spec/src/main/scripts/io/aklivity/zilla/specs/validator/core/schema/string.schema.patch.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "op": "add", - "path": "/$defs/validator/types/enum/-", - "value": "string" - } -] diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/LongValidatorFactory.java b/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/LongValidatorFactory.java deleted file mode 100644 index e0bcbe33df..0000000000 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/LongValidatorFactory.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.core; - -import java.net.URL; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; - -import io.aklivity.zilla.runtime.common.feature.Incubating; -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi; -import io.aklivity.zilla.runtime.validator.core.config.LongValidatorConfig; - -@Incubating -public class LongValidatorFactory implements ValidatorFactorySpi -{ - @Override - public String type() - { - return "long"; - } - - @Override - public URL schema() - { - return getClass().getResource("schema/long.schema.patch.json"); - } - - @Override - public Validator create( - ValidatorConfig config, - ToLongFunction resolveId, - LongFunction supplyCatalog) - { - return new LongValidator(LongValidatorConfig.class.cast(config)); - } -} diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/StringValidator.java b/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/StringValidator.java deleted file mode 100644 index b79b298021..0000000000 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/StringValidator.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.core; - -import java.util.function.Predicate; - -import org.agrona.DirectBuffer; - -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.validator.core.config.StringValidatorConfig; - -public final class StringValidator implements Validator -{ - private Predicate predicate; - - public StringValidator( - StringValidatorConfig config) - { - this.predicate = config.encoding.equals("utf_8") ? this::isValidUTF8 : - config.encoding.equals("utf_16") ? this::isValidUTF16 : - bytes -> false; - } - - @Override - public boolean read( - DirectBuffer data, - int index, - int length) - { - return validate(data, index, length); - } - - @Override - public boolean write( - DirectBuffer data, - int index, - int length) - { - return validate(data, index, length); - } - - private boolean validate( - DirectBuffer data, - int index, - int length) - { - byte[] payloadBytes = new byte[length]; - data.getBytes(0, payloadBytes); - return predicate.test(payloadBytes); - } - - private boolean isValidUTF8( - byte[] byteArray) - { - int i = 0; - while (i < byteArray.length) - { - int numBytes; - if ((byteArray[i] & 0b10000000) == 0b00000000) - { - numBytes = 1; - } - else if ((byteArray[i] & 0b11100000) == 0b11000000) - { - numBytes = 2; - } - else if ((byteArray[i] & 0b11110000) == 0b11100000) - { - numBytes = 3; - } - else if ((byteArray[i] & 0b11111000) == 0b11110000) - { - numBytes = 4; - } - else - { - return false; - } - - for (int j = 1; j < numBytes; j++) - { - if (i + j >= byteArray.length) - { - return false; - } - if ((byteArray[i + j] & 0b11000000) != 0b10000000) - { - return false; - } - } - i += numBytes; - } - return true; - } - - private boolean isValidUTF16( - byte[] byteArray) - { - int i = 0; - boolean status = false; - - while (i < byteArray.length) - { - if (i + 1 >= byteArray.length) - { - status = false; - break; - } - - int highByte = byteArray[i] & 0xFF; - int lowByte = byteArray[i + 1] & 0xFF; - int codeUnit = (highByte << 8) | lowByte; - - if (codeUnit >= 0xD800 && codeUnit <= 0xDBFF) - { - if (i + 3 >= byteArray.length) - { - status = false; - break; - } - int secondHighByte = byteArray[i + 2] & 0xFF; - int secondLowByte = byteArray[i + 3] & 0xFF; - int secondCodeUnit = (secondHighByte << 8) | secondLowByte; - if (secondCodeUnit < 0xDC00 || secondCodeUnit > 0xDFFF) - { - status = false; - break; - } - i += 4; - } - else if (codeUnit >= 0xDC00 && codeUnit <= 0xDFFF) - { - status = false; - break; - } - else - { - i += 2; - } - status = true; - } - return status; - } -} diff --git a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfigBuilder.java b/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfigBuilder.java deleted file mode 100644 index 016551f448..0000000000 --- a/incubator/validator-core/src/main/java/io/aklivity/zilla/runtime/validator/core/config/IntegerValidatorConfigBuilder.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.core.config; - -import java.util.function.Function; - -import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; - -public class IntegerValidatorConfigBuilder extends ConfigBuilder> -{ - private final Function mapper; - - IntegerValidatorConfigBuilder( - Function mapper) - { - this.mapper = mapper; - } - - @Override - @SuppressWarnings("unchecked") - protected Class> thisType() - { - return (Class>) getClass(); - } - - @Override - public T build() - { - return mapper.apply(new IntegerValidatorConfig()); - } -} diff --git a/incubator/validator-core/src/main/moditect/module-info.java b/incubator/validator-core/src/main/moditect/module-info.java deleted file mode 100644 index e114502215..0000000000 --- a/incubator/validator-core/src/main/moditect/module-info.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -module io.aklivity.zilla.runtime.validator.core -{ - requires io.aklivity.zilla.runtime.engine; - - exports io.aklivity.zilla.runtime.validator.core.config; - - provides io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi - with io.aklivity.zilla.runtime.validator.core.config.StringValidatorConfigAdapter, - io.aklivity.zilla.runtime.validator.core.config.IntegerValidatorConfigAdapter, - io.aklivity.zilla.runtime.validator.core.config.LongValidatorConfigAdapter; - - provides io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi - with io.aklivity.zilla.runtime.validator.core.StringValidatorFactory, - io.aklivity.zilla.runtime.validator.core.IntegerValidatorFactory, - io.aklivity.zilla.runtime.validator.core.LongValidatorFactory; -} diff --git a/incubator/validator-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi b/incubator/validator-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi deleted file mode 100644 index fbac878d8b..0000000000 --- a/incubator/validator-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi +++ /dev/null @@ -1,3 +0,0 @@ -io.aklivity.zilla.runtime.validator.core.config.IntegerValidatorConfigAdapter -io.aklivity.zilla.runtime.validator.core.config.LongValidatorConfigAdapter -io.aklivity.zilla.runtime.validator.core.config.StringValidatorConfigAdapter diff --git a/incubator/validator-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi b/incubator/validator-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi deleted file mode 100644 index d8637946ac..0000000000 --- a/incubator/validator-core/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi +++ /dev/null @@ -1,3 +0,0 @@ -io.aklivity.zilla.runtime.validator.core.IntegerValidatorFactory -io.aklivity.zilla.runtime.validator.core.LongValidatorFactory -io.aklivity.zilla.runtime.validator.core.StringValidatorFactory diff --git a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/IntegerValidatorFactoryTest.java b/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/IntegerValidatorFactoryTest.java deleted file mode 100644 index cc1c02f163..0000000000 --- a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/IntegerValidatorFactoryTest.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.core; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Mockito.mock; - -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; - -import org.junit.Test; - -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.validator.core.config.IntegerValidatorConfig; - -public class IntegerValidatorFactoryTest -{ - @Test - @SuppressWarnings("unchecked") - public void shouldCreate() - { - // GIVEN - ValidatorConfig validator = new IntegerValidatorConfig(); - ToLongFunction resolveId = mock(ToLongFunction.class); - LongFunction supplyCatalog = mock(LongFunction.class); - IntegerValidatorFactory factory = new IntegerValidatorFactory(); - - // WHEN - Validator integerValidator = factory.create(validator, resolveId, supplyCatalog); - - // THEN - assertThat(integerValidator, instanceOf(IntegerValidator.class)); - } -} diff --git a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/LongValidatorFactoryTest.java b/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/LongValidatorFactoryTest.java deleted file mode 100644 index e45afe1893..0000000000 --- a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/LongValidatorFactoryTest.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.core; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Mockito.mock; - -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; - -import org.junit.Test; - -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.validator.core.config.LongValidatorConfig; - -public class LongValidatorFactoryTest -{ - @Test - @SuppressWarnings("unchecked") - public void shouldCreate() - { - // GIVEN - ValidatorConfig validator = new LongValidatorConfig(); - ToLongFunction resolveId = mock(ToLongFunction.class); - LongFunction supplyCatalog = mock(LongFunction.class); - LongValidatorFactory factory = new LongValidatorFactory(); - - // WHEN - Validator longValidator = factory.create(validator, resolveId, supplyCatalog); - - // THEN - assertThat(longValidator, instanceOf(LongValidator.class)); - } -} diff --git a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/LongValidatorTest.java b/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/LongValidatorTest.java deleted file mode 100644 index b1b8d9a926..0000000000 --- a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/LongValidatorTest.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.core; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import org.agrona.DirectBuffer; -import org.agrona.concurrent.UnsafeBuffer; -import org.junit.Test; - -import io.aklivity.zilla.runtime.validator.core.config.LongValidatorConfig; - -public class LongValidatorTest -{ - private final LongValidatorConfig config = new LongValidatorConfig(); - private final LongValidator validator = new LongValidator(config); - - @Test - public void shouldVerifyValidLong() - { - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = {0, 0, 0, 0, 0, 0, 0, 42}; - data.wrap(bytes, 0, bytes.length); - assertTrue(validator.read(data, 0, data.capacity())); - } - - @Test - public void shouldVerifyInvalidLong() - { - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = {0, 0, 0, 42}; - data.wrap(bytes, 0, bytes.length); - assertFalse(validator.write(data, 0, data.capacity())); - } -} diff --git a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/StringValidatorFactoryTest.java b/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/StringValidatorFactoryTest.java deleted file mode 100644 index 51ea3c2f3a..0000000000 --- a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/StringValidatorFactoryTest.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.core; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Mockito.mock; - -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; - -import org.junit.Test; - -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.validator.core.config.StringValidatorConfig; - -public class StringValidatorFactoryTest -{ - @Test - @SuppressWarnings("unchecked") - public void shouldCreate() - { - // GIVEN - ValidatorConfig validator = new StringValidatorConfig("utf_8"); - ToLongFunction resolveId = mock(ToLongFunction.class); - LongFunction supplyCatalog = mock(LongFunction.class); - StringValidatorFactory factory = new StringValidatorFactory(); - - // WHEN - Validator stringValidator = factory.create(validator, resolveId, supplyCatalog); - - // THEN - assertThat(stringValidator, instanceOf(StringValidator.class)); - } -} diff --git a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/StringValidatorTest.java b/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/StringValidatorTest.java deleted file mode 100644 index 1141d6ea16..0000000000 --- a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/StringValidatorTest.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.core; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.nio.charset.StandardCharsets; - -import org.agrona.DirectBuffer; -import org.agrona.concurrent.UnsafeBuffer; -import org.junit.Test; - -import io.aklivity.zilla.runtime.validator.core.config.StringValidatorConfig; - -public class StringValidatorTest -{ - @Test - public void shouldVerifyValidUTF8() - { - StringValidatorConfig config = new StringValidatorConfig("utf_8"); - StringValidator validator = new StringValidator(config); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = "Valid String".getBytes(); - data.wrap(bytes, 0, bytes.length); - assertTrue(validator.read(data, 0, data.capacity())); - } - - @Test - public void shouldVerifyInvalidUTF8() - { - StringValidatorConfig config = new StringValidatorConfig("utf_8"); - StringValidator validator = new StringValidator(config); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = {(byte) 0xc0}; - data.wrap(bytes, 0, bytes.length); - assertFalse(validator.read(data, 0, data.capacity())); - } - - @Test - public void shouldVerifyValidUTF16() - { - StringValidatorConfig config = new StringValidatorConfig("utf_16"); - StringValidator validator = new StringValidator(config); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = "Valid String".getBytes(StandardCharsets.UTF_16); - data.wrap(bytes, 0, bytes.length); - - assertTrue(validator.read(data, 0, data.capacity())); - } - - @Test - public void shouldVerifyIncompleteUTF16() - { - StringValidatorConfig config = new StringValidatorConfig("utf_16"); - StringValidator validator = new StringValidator(config); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = {0x48}; - data.wrap(bytes, 0, bytes.length); - assertFalse(validator.read(data, 0, data.capacity())); - } - - @Test - public void shouldVerifyIncompleteSurrogatePairUTF16() - { - StringValidatorConfig config = new StringValidatorConfig("utf_16"); - StringValidator validator = new StringValidator(config); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = {(byte) 0xD8, (byte) 0x00}; - data.wrap(bytes, 0, bytes.length); - assertFalse(validator.read(data, 0, data.capacity())); - } - - @Test - public void shouldVerifyInvalidSecondSurrogateUTF16() - { - StringValidatorConfig config = new StringValidatorConfig("utf_16"); - StringValidator validator = new StringValidator(config); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = {(byte) 0xDC, (byte) 0x01}; - data.wrap(bytes, 0, bytes.length); - assertFalse(validator.read(data, 0, data.capacity())); - } - - @Test - public void shouldVerifyUnexpectedSecondSurrogateUTF16() - { - StringValidatorConfig config = new StringValidatorConfig("utf_16"); - StringValidator validator = new StringValidator(config); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = {(byte) 0xDC, (byte) 0x80}; - data.wrap(bytes, 0, bytes.length); - assertFalse(validator.read(data, 0, data.capacity())); - } - - @Test - public void shouldVerifyValidMixedUTF16() - { - StringValidatorConfig config = new StringValidatorConfig("utf_16"); - StringValidator validator = new StringValidator(config); - - DirectBuffer data = new UnsafeBuffer(); - - byte[] bytes = {0, 72, 0, 101, 0, 108, 0, 108, 0, 111, 65, 66, 67}; - data.wrap(bytes, 0, bytes.length); - assertFalse(validator.write(data, 0, data.capacity())); - } -} diff --git a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfigAdapterTest.java b/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfigAdapterTest.java deleted file mode 100644 index 3d8e140c36..0000000000 --- a/incubator/validator-core/src/test/java/io/aklivity/zilla/runtime/validator/core/config/LongValidatorConfigAdapterTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.core.config; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; - -import jakarta.json.bind.Jsonb; -import jakarta.json.bind.JsonbBuilder; -import jakarta.json.bind.JsonbConfig; - -import org.junit.Before; -import org.junit.Test; - -public class LongValidatorConfigAdapterTest -{ - private Jsonb jsonb; - - @Before - public void initJson() - { - JsonbConfig config = new JsonbConfig() - .withAdapters(new LongValidatorConfigAdapter()); - jsonb = JsonbBuilder.create(config); - } - - @Test - public void shouldReadLongValidator() - { - // GIVEN - String json = - "{" + - "\"type\":\"long\"" + - "}"; - - // WHEN - LongValidatorConfig validator = jsonb.fromJson(json, LongValidatorConfig.class); - - // THEN - assertThat(validator, not(nullValue())); - assertThat(validator.type, equalTo("long")); - } - - @Test - public void shouldWriteLongValidator() - { - // GIVEN - String expectedJson = "\"long\""; - LongValidatorConfig validator = LongValidatorConfig.builder().build(); - - // WHEN - String json = jsonb.toJson(validator); - - // THEN - assertThat(json, not(nullValue())); - assertThat(json, equalTo(expectedJson)); - } -} diff --git a/incubator/validator-json.spec/src/main/scripts/io/aklivity/zilla/specs/validator/json/schema/json.schema.patch.json b/incubator/validator-json.spec/src/main/scripts/io/aklivity/zilla/specs/validator/json/schema/json.schema.patch.json deleted file mode 100644 index 080c669aeb..0000000000 --- a/incubator/validator-json.spec/src/main/scripts/io/aklivity/zilla/specs/validator/json/schema/json.schema.patch.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "op": "add", - "path": "/$defs/validator/types/enum/-", - "value": "json" - } -] diff --git a/incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/JsonValidator.java b/incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/JsonValidator.java deleted file mode 100644 index 1e7ddeecd6..0000000000 --- a/incubator/validator-json/src/main/java/io/aklivity/zilla/runtime/validator/json/JsonValidator.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.json; - -import java.io.ByteArrayInputStream; -import java.io.InputStream; -import java.io.StringReader; -import java.util.List; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; -import java.util.stream.Collectors; - -import jakarta.json.spi.JsonProvider; -import jakarta.json.stream.JsonParser; -import jakarta.json.stream.JsonParserFactory; - -import org.agrona.DirectBuffer; -import org.agrona.collections.Long2ObjectHashMap; -import org.leadpony.justify.api.JsonSchema; -import org.leadpony.justify.api.JsonSchemaReader; -import org.leadpony.justify.api.JsonValidationService; -import org.leadpony.justify.api.ProblemHandler; - -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; -import io.aklivity.zilla.runtime.engine.config.SchemaConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.validator.json.config.JsonValidatorConfig; - -public class JsonValidator implements Validator -{ - private final JsonProvider schemaProvider; - private final Long2ObjectHashMap handlersById; - private final JsonValidationService service; - private final JsonParserFactory factory; - private final List catalogs; - private final SchemaConfig catalog; - private final CatalogHandler handler; - - public JsonValidator( - JsonValidatorConfig config, - ToLongFunction resolveId, - LongFunction supplyCatalog) - { - this.handlersById = new Long2ObjectHashMap<>(); - this.schemaProvider = JsonProvider.provider(); - this.service = JsonValidationService.newInstance(); - this.factory = schemaProvider.createParserFactory(null); - this.catalogs = config.catalogs.stream().map(c -> - { - c.id = resolveId.applyAsLong(c.name); - handlersById.put(c.id, supplyCatalog.apply(c.id)); - return c; - }).collect(Collectors.toList()); - this.catalog = catalogs.get(0).schemas.size() != 0 ? catalogs.get(0).schemas.get(0) : null; - this.handler = handlersById.get(catalogs.get(0).id); - } - - @Override - public boolean read( - DirectBuffer data, - int index, - int length) - { - return validate(data, index, length); - } - - @Override - public boolean write( - DirectBuffer data, - int index, - int length) - { - return validate(data, index, length); - } - - private boolean validate( - DirectBuffer data, - int index, - int length) - { - String schema = null; - int schemaId = catalog != null ? catalog.id : 0; - - byte[] payloadBytes = new byte[length]; - data.getBytes(0, payloadBytes); - - if (schemaId > 0) - { - schema = handler.resolve(schemaId); - } - else if (catalog != null) - { - schemaId = handler.resolve(catalog.subject, catalog.version); - if (schemaId != 0) - { - schema = handler.resolve(schemaId); - } - } - - return schema != null && validate(schema, payloadBytes); - } - - private boolean validate( - String schema, - byte[] payloadBytes) - { - boolean status = false; - try - { - JsonParser schemaParser = factory.createParser(new StringReader(schema)); - JsonSchemaReader reader = service.createSchemaReader(schemaParser); - JsonSchema jsonSchema = reader.read(); - JsonProvider provider = service.createJsonProvider(jsonSchema, parser -> ProblemHandler.throwing()); - InputStream input = new ByteArrayInputStream(payloadBytes); - provider.createReader(input).readValue(); - status = true; - } - catch (Exception e) - { - } - return status; - } -} diff --git a/incubator/validator-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi b/incubator/validator-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi deleted file mode 100644 index 1b107098bc..0000000000 --- a/incubator/validator-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi +++ /dev/null @@ -1 +0,0 @@ -io.aklivity.zilla.runtime.validator.json.config.JsonValidatorConfigAdapter diff --git a/incubator/validator-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi b/incubator/validator-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi deleted file mode 100644 index bf8cca9996..0000000000 --- a/incubator/validator-json/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi +++ /dev/null @@ -1 +0,0 @@ -io.aklivity.zilla.runtime.validator.json.JsonValidatorFactory diff --git a/incubator/validator-json/src/test/java/io/aklivity/zilla/runtime/validator/json/JsonValidatorFactoryTest.java b/incubator/validator-json/src/test/java/io/aklivity/zilla/runtime/validator/json/JsonValidatorFactoryTest.java deleted file mode 100644 index 481e3295a3..0000000000 --- a/incubator/validator-json/src/test/java/io/aklivity/zilla/runtime/validator/json/JsonValidatorFactoryTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc - * - * Licensed under the Aklivity Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * https://www.aklivity.io/aklivity-community-license/ - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package io.aklivity.zilla.runtime.validator.json; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; - -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; - -import org.junit.Test; - -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.test.internal.catalog.TestCatalogHandler; -import io.aklivity.zilla.runtime.engine.test.internal.catalog.config.TestCatalogOptionsConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.validator.json.config.JsonValidatorConfig; - -public class JsonValidatorFactoryTest -{ - @Test - public void shouldCreate() - { - // GIVEN - ValidatorConfig validator = JsonValidatorConfig.builder() - .catalog() - .name("test0") - .build() - .build(); - ToLongFunction resolveId = i -> 0L; - LongFunction supplyCatalog = i -> new TestCatalogHandler(new TestCatalogOptionsConfig("schema0")); - JsonValidatorFactory factory = new JsonValidatorFactory(); - - // WHEN - Validator jsonValidator = factory.create(validator, resolveId, supplyCatalog); - - // THEN - assertThat(jsonValidator, instanceOf(JsonValidator.class)); - } -} diff --git a/manager/pom.xml b/manager/pom.xml index 1e9b507a9a..8a45e90412 100644 --- a/manager/pom.xml +++ b/manager/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - 0.9.66 + 0.9.67 ../pom.xml diff --git a/pom.xml b/pom.xml index 692c732ac7..be0ce6e786 100644 --- a/pom.xml +++ b/pom.xml @@ -7,7 +7,7 @@ 4.0.0 io.aklivity.zilla zilla - 0.9.66 + 0.9.67 pom zilla https://github.com/aklivity/zilla diff --git a/runtime/binding-echo/pom.xml b/runtime/binding-echo/pom.xml index 1fa806d689..6bea1de329 100644 --- a/runtime/binding-echo/pom.xml +++ b/runtime/binding-echo/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-fan/pom.xml b/runtime/binding-fan/pom.xml index ad99ab845a..aa65cfd9f4 100644 --- a/runtime/binding-fan/pom.xml +++ b/runtime/binding-fan/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-filesystem/pom.xml b/runtime/binding-filesystem/pom.xml index 793d857a93..68b55812fc 100644 --- a/runtime/binding-filesystem/pom.xml +++ b/runtime/binding-filesystem/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-grpc-kafka/pom.xml b/runtime/binding-grpc-kafka/pom.xml index d93d7a15de..020f2cded1 100644 --- a/runtime/binding-grpc-kafka/pom.xml +++ b/runtime/binding-grpc-kafka/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-grpc/pom.xml b/runtime/binding-grpc/pom.xml index 15144e2a2f..d772cb166a 100644 --- a/runtime/binding-grpc/pom.xml +++ b/runtime/binding-grpc/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcBindingConfig.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcBindingConfig.java index 323417f06a..164fd795e7 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcBindingConfig.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcBindingConfig.java @@ -16,6 +16,7 @@ import static io.aklivity.zilla.runtime.binding.grpc.internal.types.stream.GrpcType.BASE64; import static io.aklivity.zilla.runtime.binding.grpc.internal.types.stream.GrpcType.TEXT; +import static io.aklivity.zilla.runtime.engine.catalog.CatalogHandler.NO_SCHEMA_ID; import static java.util.Arrays.asList; import static java.util.stream.Collectors.toList; @@ -28,15 +29,19 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; +import java.util.function.LongFunction; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Stream; import org.agrona.AsciiSequenceView; import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; +import org.agrona.collections.ObjectHashSet; import io.aklivity.zilla.runtime.binding.grpc.config.GrpcMethodConfig; import io.aklivity.zilla.runtime.binding.grpc.config.GrpcOptionsConfig; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcProtobufConfig; import io.aklivity.zilla.runtime.binding.grpc.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.grpc.internal.types.HttpHeaderFW; import io.aklivity.zilla.runtime.binding.grpc.internal.types.String16FW; @@ -44,8 +49,11 @@ import io.aklivity.zilla.runtime.binding.grpc.internal.types.stream.GrpcMetadataFW; import io.aklivity.zilla.runtime.binding.grpc.internal.types.stream.GrpcType; import io.aklivity.zilla.runtime.binding.grpc.internal.types.stream.HttpBeginExFW; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; import io.aklivity.zilla.runtime.engine.config.BindingConfig; +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; +import io.aklivity.zilla.runtime.engine.config.SchemaConfig; public final class GrpcBindingConfig { @@ -56,7 +64,6 @@ public final class GrpcBindingConfig private static final byte[] HEADER_BIN_SUFFIX = new byte[4]; private static final byte[] GRPC_PREFIX = "grpc-".getBytes(); private static final byte[] BIN_SUFFIX = "-bin".getBytes(); - private final HttpGrpcHeaderHelper helper; public final long id; public final String name; @@ -64,20 +71,34 @@ public final class GrpcBindingConfig public final GrpcOptionsConfig options; public final List routes; + private final GrpcProtobufParser parser; + private final HttpGrpcHeaderHelper helper; + private final Set catalogs; public GrpcBindingConfig( BindingConfig binding, - MutableDirectBuffer metadataBuffer) + MutableDirectBuffer metadataBuffer, + LongFunction supplyCatalog) { this.id = binding.id; this.name = binding.name; this.kind = binding.kind; this.options = GrpcOptionsConfig.class.cast(binding.options); this.routes = binding.routes.stream().map(GrpcRouteConfig::new).collect(toList()); + this.parser = new GrpcProtobufParser(); this.helper = new HttpGrpcHeaderHelper(metadataBuffer); + Set catalogs = new ObjectHashSet<>(); + for (CatalogedConfig catalog : binding.catalogs) + { + CatalogHandler handler = supplyCatalog.apply(catalog.id); + for (SchemaConfig schema : catalog.schemas) + { + catalogs.add(new GrpcCatalogSchema(handler, schema.subject, schema.version)); + } + } + this.catalogs = catalogs; } - public GrpcRouteConfig resolve( long authorization, CharSequence service, @@ -107,13 +128,12 @@ public GrpcMethodResult resolveMethod( final CharSequence serviceName = serviceNameHeader != null ? serviceNameHeader : matcher.group(SERVICE_NAME); final String methodName = matcher.group(METHOD); - final GrpcMethodConfig method = options.protobufs.stream() - .map(p -> p.services.stream().filter(s -> s.service.equals(serviceName)).findFirst().orElse(null)) - .filter(Objects::nonNull) - .map(s -> s.methods.stream().filter(m -> m.method.equals(methodName)).findFirst().orElse(null)) - .filter(Objects::nonNull) - .findFirst() - .orElse(null); + GrpcMethodConfig method = resolveMethod(catalogs, serviceName, methodName); + + if (method == null && options != null) + { + method = resolveMethod(options.protobufs, serviceName, methodName); + } if (method != null) { @@ -133,6 +153,36 @@ public GrpcMethodResult resolveMethod( return methodResolver; } + private GrpcMethodConfig resolveMethod( + Set catalogs, + CharSequence serviceName, + String methodName) + { + return resolveMethod(catalogs.stream().map(GrpcCatalogSchema::resolveProtobuf), serviceName, methodName); + } + + private GrpcMethodConfig resolveMethod( + List protobufs, + CharSequence serviceName, + String methodName) + { + return resolveMethod(protobufs.stream(), serviceName, methodName); + } + + private GrpcMethodConfig resolveMethod( + Stream protobufs, + CharSequence serviceName, + String methodName) + { + return protobufs + .map(p -> p.services.stream().filter(s -> s.service.equals(serviceName)).findFirst().orElse(null)) + .filter(Objects::nonNull) + .map(s -> s.methods.stream().filter(m -> m.method.equals(methodName)).findFirst().orElse(null)) + .filter(Objects::nonNull) + .findFirst() + .orElse(null); + } + private static final class HttpGrpcHeaderHelper { private static final Pattern PERIOD_PATTERN = Pattern.compile("([0-9]+)([HMSmun])"); @@ -187,7 +237,6 @@ private static final class HttpGrpcHeaderHelper public String16FW authority; public String16FW te; - HttpGrpcHeaderHelper( MutableDirectBuffer metadataBuffer) { @@ -350,4 +399,39 @@ private long parsePeriod( return milliseconds; } } + + final class GrpcCatalogSchema + { + final CatalogHandler handler; + final String subject; + final String version; + + GrpcProtobufConfig protobuf; + + int schemaId = NO_SCHEMA_ID; + + GrpcCatalogSchema( + CatalogHandler handler, + String subject, + String version) + { + this.handler = handler; + this.subject = subject; + this.version = version; + } + + private GrpcProtobufConfig resolveProtobuf() + { + final int newSchemaId = handler.resolve(subject, version); + + if (schemaId != newSchemaId) + { + schemaId = newSchemaId; + String schema = handler.resolve(schemaId); + protobuf = parser.parse(null, schema); + } + + return protobuf; + } + } } diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapter.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapter.java index c2a55f352c..41d2d9131b 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapter.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapter.java @@ -17,7 +17,6 @@ import static java.util.stream.Collectors.toList; import java.util.List; -import java.util.Set; import java.util.function.Function; import jakarta.json.Json; @@ -29,19 +28,9 @@ import jakarta.json.JsonValue; import jakarta.json.bind.adapter.JsonbAdapter; -import org.agrona.collections.ObjectHashSet; -import org.antlr.v4.runtime.BailErrorStrategy; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CharStreams; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.tree.ParseTreeWalker; - import io.aklivity.zilla.runtime.binding.grpc.config.GrpcOptionsConfig; import io.aklivity.zilla.runtime.binding.grpc.config.GrpcProtobufConfig; -import io.aklivity.zilla.runtime.binding.grpc.config.GrpcServiceConfig; import io.aklivity.zilla.runtime.binding.grpc.internal.GrpcBinding; -import io.aklivity.zilla.runtime.binding.grpc.internal.parser.Protobuf3Lexer; -import io.aklivity.zilla.runtime.binding.grpc.internal.parser.Protobuf3Parser; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; @@ -49,6 +38,9 @@ public final class GrpcOptionsConfigAdapter implements OptionsConfigAdapterSpi, JsonbAdapter { private static final String SERVICES_NAME = "services"; + + private final GrpcProtobufParser parser = new GrpcProtobufParser(); + private Function readURL; @Override @@ -111,18 +103,8 @@ private GrpcProtobufConfig asProtobuf( JsonValue value) { final String location = ((JsonString) value).getString(); - final String protoService = readURL.apply(location); - CharStream input = CharStreams.fromString(protoService); - Protobuf3Lexer lexer = new Protobuf3Lexer(input); - CommonTokenStream tokens = new CommonTokenStream(lexer); - - Protobuf3Parser parser = new Protobuf3Parser(tokens); - parser.setErrorHandler(new BailErrorStrategy()); - ParseTreeWalker walker = new ParseTreeWalker(); - Set services = new ObjectHashSet<>(); - GrpcServiceDefinitionListener listener = new GrpcServiceDefinitionListener(services); - walker.walk(listener, parser.proto()); - - return new GrpcProtobufConfig(location, services); + final String protobuf = readURL.apply(location); + + return parser.parse(location, protobuf); } } diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcProtobufParser.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcProtobufParser.java new file mode 100644 index 0000000000..08d2681252 --- /dev/null +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcProtobufParser.java @@ -0,0 +1,66 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.binding.grpc.internal.config; + +import java.util.Set; + +import org.agrona.collections.ObjectHashSet; +import org.antlr.v4.runtime.BailErrorStrategy; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.CharStreams; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.tree.ParseTreeWalker; + +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcProtobufConfig; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcServiceConfig; +import io.aklivity.zilla.runtime.binding.grpc.internal.parser.Protobuf3Lexer; +import io.aklivity.zilla.runtime.binding.grpc.internal.parser.Protobuf3Parser; + +public final class GrpcProtobufParser +{ + private final ParseTreeWalker walker; + private final BailErrorStrategy errorStrategy; + private final Protobuf3Lexer lexer; + private CommonTokenStream tokens; + private final Protobuf3Parser parser; + + public GrpcProtobufParser() + { + this.walker = new ParseTreeWalker(); + this.errorStrategy = new BailErrorStrategy(); + this.lexer = new Protobuf3Lexer(null); + this.parser = new Protobuf3Parser(null); + this.tokens = new CommonTokenStream(lexer); + parser.setErrorHandler(errorStrategy); + } + + public GrpcProtobufConfig parse( + String location, + String schema) + { + CharStream input = CharStreams.fromString(schema); + lexer.reset(); + lexer.setInputStream(input); + + tokens.setTokenSource(lexer); + parser.setTokenStream(tokens); + + Set services = new ObjectHashSet<>(); + GrpcServiceDefinitionListener listener = new GrpcServiceDefinitionListener(services); + walker.walk(listener, parser.proto()); + + return new GrpcProtobufConfig(location, services); + } +} diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcClientFactory.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcClientFactory.java index d3c30530d2..bf070075bd 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcClientFactory.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcClientFactory.java @@ -14,6 +14,7 @@ */ package io.aklivity.zilla.runtime.binding.grpc.internal.stream; +import java.util.function.LongFunction; import java.util.function.LongUnaryOperator; import org.agrona.DirectBuffer; @@ -50,6 +51,7 @@ import io.aklivity.zilla.runtime.engine.EngineContext; import io.aklivity.zilla.runtime.engine.binding.BindingHandler; import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; import io.aklivity.zilla.runtime.engine.config.BindingConfig; public class GrpcClientFactory implements GrpcStreamFactory @@ -114,6 +116,7 @@ public class GrpcClientFactory implements GrpcStreamFactory private final MutableDirectBuffer metadataBuffer; private final MutableDirectBuffer extBuffer; private final BindingHandler streamFactory; + private final LongFunction supplyCatalog; private final LongUnaryOperator supplyInitialId; private final LongUnaryOperator supplyReplyId; private final int httpTypeId; @@ -130,6 +133,7 @@ public GrpcClientFactory( this.metadataBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); this.extBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); this.streamFactory = context.streamFactory(); + this.supplyCatalog = context::supplyCatalog; this.supplyInitialId = context::supplyInitialId; this.supplyReplyId = context::supplyReplyId; this.httpTypeId = context.supplyTypeId(HTTP_TYPE_NAME); @@ -159,7 +163,7 @@ public int routedTypeId() public void attach( BindingConfig binding) { - GrpcBindingConfig grpcBinding = new GrpcBindingConfig(binding, metadataBuffer); + GrpcBindingConfig grpcBinding = new GrpcBindingConfig(binding, metadataBuffer, supplyCatalog); bindings.put(binding.id, grpcBinding); } diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcServerFactory.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcServerFactory.java index cd44246321..475d7f85ea 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcServerFactory.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcServerFactory.java @@ -23,6 +23,7 @@ import static java.time.Instant.now; import java.util.function.Consumer; +import java.util.function.LongFunction; import java.util.function.LongSupplier; import java.util.function.LongUnaryOperator; @@ -62,6 +63,7 @@ import io.aklivity.zilla.runtime.engine.binding.BindingHandler; import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; import io.aklivity.zilla.runtime.engine.buffer.BufferPool; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; import io.aklivity.zilla.runtime.engine.concurrent.Signaler; import io.aklivity.zilla.runtime.engine.config.BindingConfig; @@ -134,6 +136,7 @@ public final class GrpcServerFactory implements GrpcStreamFactory private final BufferPool bufferPool; private final Signaler signaler; private final BindingHandler streamFactory; + private final LongFunction supplyCatalog; private final LongUnaryOperator supplyInitialId; private final LongUnaryOperator supplyReplyId; private final LongSupplier supplyTraceId; @@ -235,6 +238,7 @@ public GrpcServerFactory( this.bufferPool = context.bufferPool(); this.signaler = context.signaler(); this.streamFactory = context.streamFactory(); + this.supplyCatalog = context::supplyCatalog; this.supplyInitialId = context::supplyInitialId; this.supplyReplyId = context::supplyReplyId; this.supplyTraceId = context::supplyTraceId; @@ -259,7 +263,7 @@ public int routedTypeId() public void attach( BindingConfig binding) { - GrpcBindingConfig grpcBinding = new GrpcBindingConfig(binding, metadataBuffer); + GrpcBindingConfig grpcBinding = new GrpcBindingConfig(binding, metadataBuffer, supplyCatalog); bindings.put(binding.id, grpcBinding); } diff --git a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/UnaryRpcIT.java b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/UnaryRpcIT.java index e622a07d75..f8d3938d3a 100644 --- a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/UnaryRpcIT.java +++ b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/UnaryRpcIT.java @@ -68,6 +68,17 @@ public void shouldEstablishWithBinaryMetadata() throws Exception k3po.finish(); } + @Test + @Configuration("server.when.catalog.yaml") + @Specification({ + "${net}/message.exchange/client", + "${app}/message.exchange/server" + }) + public void shouldEstablishUnaryRpcFromCatalogSchema() throws Exception + { + k3po.finish(); + } + @Test @Configuration("server.when.yaml") @Specification({ diff --git a/runtime/binding-http-filesystem/pom.xml b/runtime/binding-http-filesystem/pom.xml index 4fea1d660c..555edae6b8 100644 --- a/runtime/binding-http-filesystem/pom.xml +++ b/runtime/binding-http-filesystem/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-http-kafka/pom.xml b/runtime/binding-http-kafka/pom.xml index 3fe004ec7b..5e8496ae5b 100644 --- a/runtime/binding-http-kafka/pom.xml +++ b/runtime/binding-http-kafka/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-http/pom.xml b/runtime/binding-http/pom.xml index dfd565effd..30054a8f46 100644 --- a/runtime/binding-http/pom.xml +++ b/runtime/binding-http/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfig.java index 9ca894af79..40f0152347 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfig.java @@ -15,10 +15,15 @@ */ package io.aklivity.zilla.runtime.binding.http.config; +import static java.util.Collections.emptyList; + import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.SortedSet; import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; import io.aklivity.zilla.runtime.binding.http.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.http.internal.types.String8FW; @@ -50,6 +55,37 @@ public static HttpOptionsConfigBuilder builder( HttpAuthorizationConfig authorization, List requests) { + super(requests != null && !requests.isEmpty() + ? requests.stream() + .flatMap(request -> Stream.concat( + Stream.of(request.content), + Stream.concat( + request.headers != null + ? request.headers.stream().flatMap(header -> Stream.of(header != null ? header.model : null)) + : Stream.empty(), + Stream.concat( + request.pathParams != null + ? request.pathParams.stream().flatMap(param -> Stream.of(param != null ? param.model : null)) + : Stream.empty(), + Stream.concat( + request.queryParams != null + ? request.queryParams.stream().flatMap(param -> Stream.of(param != null ? param.model : null)) + : Stream.empty(), + Stream.concat(request.responses != null + ? request.responses.stream().flatMap(param -> Stream.of(param != null + ? param.content + : null)) + : Stream.empty(), request.responses != null + ? request.responses.stream() + .flatMap(response -> response.headers != null + ? response.headers.stream() + .flatMap(param -> Stream.of(param != null ? param.model : null)) + : Stream.empty()) + : Stream.empty()) + )))).filter(Objects::nonNull)) + .collect(Collectors.toList()) + : emptyList()); + this.versions = versions; this.overrides = overrides; this.access = access; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpParamConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpParamConfig.java index 498e14fe26..0cff9c9370 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpParamConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpParamConfig.java @@ -17,19 +17,19 @@ import static java.util.function.Function.identity; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; public class HttpParamConfig { public String name; - public ValidatorConfig validator; + public ModelConfig model; public HttpParamConfig( String name, - ValidatorConfig validator) + ModelConfig model) { this.name = name; - this.validator = validator; + this.model = model; } public static HttpParamConfigBuilder builder() diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpParamConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpParamConfigBuilder.java index 675a93f0ee..2f9b479914 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpParamConfigBuilder.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpParamConfigBuilder.java @@ -18,14 +18,14 @@ import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; public class HttpParamConfigBuilder extends ConfigBuilder> { private final Function mapper; private String name; - private ValidatorConfig validator; + private ModelConfig model; HttpParamConfigBuilder( Function mapper) @@ -47,22 +47,22 @@ public HttpParamConfigBuilder name( return this; } - public HttpParamConfigBuilder validator( - ValidatorConfig validator) + public HttpParamConfigBuilder model( + ModelConfig model) { - this.validator = validator; + this.model = model; return this; } - public , C>> C validator( - Function>, C> validator) + public , C>> C model( + Function>, C> model) { - return validator.apply(this::validator); + return model.apply(this::model); } @Override public T build() { - return mapper.apply(new HttpParamConfig(name, validator)); + return mapper.apply(new HttpParamConfig(name, model)); } } diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpRequestConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpRequestConfig.java index db804f4b75..0a8ec54812 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpRequestConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpRequestConfig.java @@ -19,7 +19,7 @@ import java.util.List; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; public class HttpRequestConfig { @@ -41,7 +41,7 @@ public enum Method public final List headers; public final List pathParams; public final List queryParams; - public final ValidatorConfig content; + public final ModelConfig content; public final List responses; public HttpRequestConfig( @@ -51,7 +51,7 @@ public HttpRequestConfig( List headers, List pathParams, List queryParams, - ValidatorConfig content, + ModelConfig content, List responses) { this.path = path; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpRequestConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpRequestConfigBuilder.java index 439a2cfd53..b8ab42fc3e 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpRequestConfigBuilder.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpRequestConfigBuilder.java @@ -20,7 +20,7 @@ import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; public class HttpRequestConfigBuilder extends ConfigBuilder> { @@ -32,7 +32,7 @@ public class HttpRequestConfigBuilder extends ConfigBuilder headers; private List pathParams; private List queryParams; - private ValidatorConfig content; + private ModelConfig content; private List responses; HttpRequestConfigBuilder( @@ -151,14 +151,14 @@ public HttpRequestConfigBuilder queryParam( } public HttpRequestConfigBuilder content( - ValidatorConfig content) + ModelConfig content) { this.content = content; return this; } public , C>> C content( - Function>, C> content) + Function>, C> content) { return content.apply(this::content); } diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpResponseConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpResponseConfig.java index 51eaeb0832..a997f2d251 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpResponseConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpResponseConfig.java @@ -19,20 +19,20 @@ import java.util.List; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; public class HttpResponseConfig { public final List status; public final List contentType; public final List headers; - public final ValidatorConfig content; + public final ModelConfig content; public HttpResponseConfig( List status, List contentType, List headers, - ValidatorConfig content) + ModelConfig content) { this.status = status; this.contentType = contentType; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpResponseConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpResponseConfigBuilder.java index 1515be59a3..dd36174bd8 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpResponseConfigBuilder.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpResponseConfigBuilder.java @@ -20,7 +20,7 @@ import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; public class HttpResponseConfigBuilder extends ConfigBuilder> { @@ -29,7 +29,7 @@ public class HttpResponseConfigBuilder extends ConfigBuilder status; private List contentType; private List headers; - private ValidatorConfig content; + private ModelConfig content; HttpResponseConfigBuilder( Function mapper) @@ -91,14 +91,14 @@ public HttpParamConfigBuilder> header() public HttpResponseConfigBuilder content( - ValidatorConfig content) + ModelConfig content) { this.content = content; return this; } public , C>> C content( - Function>, C> content) + Function>, C> content) { return content.apply(this::content); } diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/HttpConfiguration.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/HttpConfiguration.java index 58df0ec1cd..9a7eab101e 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/HttpConfiguration.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/HttpConfiguration.java @@ -15,6 +15,8 @@ */ package io.aklivity.zilla.runtime.binding.http.internal; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_VERBOSE; + import io.aklivity.zilla.runtime.binding.http.internal.types.String16FW; import io.aklivity.zilla.runtime.engine.Configuration; import io.aklivity.zilla.runtime.engine.EngineConfiguration; @@ -35,6 +37,7 @@ public class HttpConfiguration extends Configuration public static final IntPropertyDef HTTP_MAX_CONCURRENT_APPLICATION_HEADERS; public static final PropertyDef HTTP_SERVER_HEADER; public static final PropertyDef HTTP_USER_AGENT_HEADER; + public static final BooleanPropertyDef HTTP_VERBOSE; private static final ConfigurationDef HTTP_CONFIG; @@ -52,6 +55,7 @@ public class HttpConfiguration extends Configuration HTTP_MAX_CONCURRENT_STREAMS_CLEANUP = config.property("max.concurrent.streams.cleanup", 1000); HTTP_STREAMS_CLEANUP_DELAY = config.property("streams.cleanup.delay", 100); HTTP_MAX_CONCURRENT_APPLICATION_HEADERS = config.property("max.concurrent.application.headers", 10000); + HTTP_VERBOSE = config.property("verbose", HttpConfiguration::verboseDefault); HTTP_CONFIG = config; } @@ -122,4 +126,15 @@ public String16FW userAgentHeader() { return userAgentHeader; } + + public boolean verbose() + { + return HTTP_VERBOSE.get(this); + } + + private static boolean verboseDefault( + Configuration config) + { + return ENGINE_VERBOSE.getAsBoolean(config); + } } diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java index eee6f89c90..05a6eea2c4 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java @@ -26,14 +26,11 @@ import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; -import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.ToLongFunction; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.agrona.DirectBuffer; -import org.agrona.collections.MutableBoolean; import org.agrona.collections.Object2ObjectHashMap; import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig; @@ -45,13 +42,12 @@ import io.aklivity.zilla.runtime.binding.http.config.HttpResponseConfig; import io.aklivity.zilla.runtime.binding.http.config.HttpVersion; import io.aklivity.zilla.runtime.binding.http.internal.types.HttpHeaderFW; -import io.aklivity.zilla.runtime.binding.http.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.http.internal.types.String8FW; import io.aklivity.zilla.runtime.binding.http.internal.types.stream.HttpBeginExFW; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; public final class HttpBindingConfig { @@ -76,7 +72,7 @@ public final class HttpBindingConfig public HttpBindingConfig( BindingConfig binding, - BiFunction, Validator> createValidator) + Function supplyValidator) { this.id = binding.id; this.name = binding.name; @@ -86,7 +82,7 @@ public HttpBindingConfig( this.resolveId = binding.resolveId; this.credentials = options != null && options.authorization != null ? asAccessor(options.authorization.credentials) : DEFAULT_CREDENTIALS; - this.requests = createValidator == null ? null : createRequestTypes(createValidator); + this.requests = supplyValidator == null ? null : createRequestTypes(supplyValidator); } public HttpRouteConfig resolve( @@ -191,66 +187,63 @@ private Function, String> asAccessor( } private List createRequestTypes( - BiFunction, Validator> createValidator) + Function supplyValidator) { List requestTypes = new LinkedList<>(); if (this.options != null && this.options.requests != null) { for (HttpRequestConfig request : this.options.requests) { - Map headers = new HashMap<>(); + Map headers = new HashMap<>(); if (request.headers != null) { for (HttpParamConfig header : request.headers) { - headers.put(new String8FW(header.name), createValidator.apply(header.validator, this.resolveId)); + headers.put(new String8FW(header.name), supplyValidator.apply(header.model)); } } - Map pathParams = new Object2ObjectHashMap<>(); + + Map pathParams = new Object2ObjectHashMap<>(); if (request.pathParams != null) { for (HttpParamConfig pathParam : request.pathParams) { - pathParams.put(pathParam.name, createValidator.apply(pathParam.validator, this.resolveId)); + pathParams.put(pathParam.name, supplyValidator.apply(pathParam.model)); } } - Map queryParams = new TreeMap<>(QUERY_STRING_COMPARATOR); + + Map queryParams = new TreeMap<>(QUERY_STRING_COMPARATOR); if (request.queryParams != null) { for (HttpParamConfig queryParam : request.queryParams) { - queryParams.put(queryParam.name, createValidator.apply(queryParam.validator, this.resolveId)); + queryParams.put(queryParam.name, supplyValidator.apply(queryParam.model)); } } + List responses = new LinkedList<>(); if (request.responses != null) { for (HttpResponseConfig response0 : request.responses) { - Map responseHeaderValidators = new HashMap<>(); + Map responseHeaderValidators = new HashMap<>(); if (response0.headers != null) { for (HttpParamConfig header : response0.headers) { String8FW name = new String8FW(header.name); - Validator validator = createValidator.apply(header.validator, this.resolveId); + ValidatorHandler validator = supplyValidator.apply(header.model); if (validator != null) { responseHeaderValidators.put(name, validator); } } } - Validator contentValidator = null; - if (response0.content != null) - { - contentValidator = createValidator.apply(response0.content, this.resolveId); - } HttpRequestType.Response response = new HttpRequestType.Response(response0.status, response0.contentType, - responseHeaderValidators, contentValidator); + responseHeaderValidators, response0.content); responses.add(response); } } - Validator content = request.content == null ? null : createValidator.apply(request.content, this.resolveId); HttpRequestType requestType = HttpRequestType.builder() .path(request.path) .method(request.method) @@ -258,7 +251,7 @@ private List createRequestTypes( .headers(headers) .pathParams(pathParams) .queryParams(queryParams) - .content(content) + .content(request.content) .responses(responses) .build(); requestTypes.add(requestType); @@ -346,120 +339,6 @@ private boolean matchResponseContentType( return contentType == null || response.contentType == null || response.contentType.contains(contentType); } - public boolean validateResponseHeaders( - HttpRequestType.Response response, - HttpBeginExFW beginEx) - { - MutableBoolean valid = new MutableBoolean(true); - if (response != null && response.headers != null) - { - beginEx.headers().forEach(header -> - { - if (valid.value) - { - Validator validator = response.headers.get(header.name()); - if (validator != null) - { - String16FW value = header.value(); - valid.value &= validator.read(value.value(), value.offset(), value.length()); - } - } - }); - } - return valid.value; - } - - - public boolean validateHeaders( - HttpRequestType requestType, - HttpBeginExFW beginEx) - { - String path = beginEx.headers().matchFirst(h -> h.name().equals(HEADER_PATH)).value().asString(); - return requestType == null || - validateHeaderValues(requestType, beginEx) && - validatePathParams(requestType, path) && - validateQueryParams(requestType, path); - } - - private boolean validateHeaderValues( - HttpRequestType requestType, - HttpBeginExFW beginEx) - { - MutableBoolean valid = new MutableBoolean(true); - if (requestType != null && requestType.headers != null) - { - beginEx.headers().forEach(header -> - { - if (valid.value) - { - Validator validator = requestType.headers.get(header.name()); - if (validator != null) - { - String16FW value = header.value(); - valid.value &= validator.read(value.value(), value.offset(), value.length()); - } - } - }); - } - return valid.value; - } - - private boolean validatePathParams( - HttpRequestType requestType, - String path) - { - Matcher matcher = requestType.pathMatcher.reset(path); - boolean matches = matcher.matches(); - assert matches; - - boolean valid = true; - for (String name : requestType.pathParams.keySet()) - { - String value = matcher.group(name); - if (value != null) - { - String8FW value0 = new String8FW(value); - Validator validator = requestType.pathParams.get(name); - if (!validator.read(value0.value(), value0.offset(), value0.length())) - { - valid = false; - break; - } - } - } - return valid; - } - - private boolean validateQueryParams( - HttpRequestType requestType, - String path) - { - Matcher matcher = requestType.queryMatcher.reset(path); - boolean valid = true; - while (valid && matcher.find()) - { - String name = matcher.group(1); - Validator validator = requestType.queryParams.get(name); - if (validator != null) - { - String8FW value = new String8FW(matcher.group(2)); - valid &= validator.read(value.value(), value.offset(), value.length()); - } - } - return valid; - } - - public boolean validateContent( - HttpRequestType requestType, - DirectBuffer buffer, - int index, - int length) - { - return requestType == null || - requestType.content == null || - requestType.content.read(buffer, index, length); - } - private static Function, String> orElseIfNull( Function, String> first, Function, String> second) diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestConfigAdapter.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestConfigAdapter.java index f93302ec48..a9b675bae9 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestConfigAdapter.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestConfigAdapter.java @@ -31,8 +31,8 @@ import io.aklivity.zilla.runtime.binding.http.config.HttpParamConfig; import io.aklivity.zilla.runtime.binding.http.config.HttpRequestConfig; import io.aklivity.zilla.runtime.binding.http.config.HttpResponseConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapter; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfigAdapter; public class HttpRequestConfigAdapter implements JsonbAdapter { @@ -46,7 +46,7 @@ public class HttpRequestConfigAdapter implements JsonbAdapter ((JsonString) i).getString()) .collect(Collectors.toList()); } - ValidatorConfig content = null; + ModelConfig content = null; if (object.containsKey(CONTENT_NAME)) { JsonValue contentJson = object.get(CONTENT_NAME); - content = validator.adaptFromJson(contentJson); + content = model.adaptFromJson(contentJson); } List headers = null; if (object.containsKey(HEADERS_NAME)) @@ -156,7 +156,7 @@ public HttpRequestConfig adaptFromJson( { HttpParamConfig header = HttpParamConfig.builder() .name(entry.getKey()) - .validator(validator.adaptFromJson(entry.getValue())) + .model(model.adaptFromJson(entry.getValue())) .build(); headers.add(header); } @@ -174,7 +174,7 @@ public HttpRequestConfig adaptFromJson( { HttpParamConfig pathParam = HttpParamConfig.builder() .name(entry.getKey()) - .validator(validator.adaptFromJson(entry.getValue())) + .model(model.adaptFromJson(entry.getValue())) .build(); pathParams.add(pathParam); } @@ -187,7 +187,7 @@ public HttpRequestConfig adaptFromJson( { HttpParamConfig queryParam = HttpParamConfig.builder() .name(entry.getKey()) - .validator(validator.adaptFromJson(entry.getValue())) + .model(model.adaptFromJson(entry.getValue())) .build(); queryParams.add(queryParam); } diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestType.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestType.java index 455f7803b9..d5179f9b9d 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestType.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestType.java @@ -22,7 +22,8 @@ import io.aklivity.zilla.runtime.binding.http.config.HttpRequestConfig; import io.aklivity.zilla.runtime.binding.http.internal.types.String8FW; -import io.aklivity.zilla.runtime.engine.validator.Validator; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; public final class HttpRequestType { @@ -42,11 +43,11 @@ public final class HttpRequestType public final Matcher pathMatcher; public final Matcher queryMatcher; - // request validators - public final Map headers; - public final Map pathParams; - public final Map queryParams; - public final Validator content; + // validators + public final Map headers; + public final Map pathParams; + public final Map queryParams; + public final ModelConfig content; // responses public final List responses; @@ -57,10 +58,10 @@ private HttpRequestType( List contentType, Matcher pathMatcher, Matcher queryMatcher, - Map headers, - Map pathParams, - Map queryParams, - Validator content, + Map headers, + Map pathParams, + Map queryParams, + ModelConfig content, List responses) { this.path = path; @@ -79,14 +80,14 @@ public static final class Response { public final List status; public final List contentType; - public final Map headers; - public final Validator content; + public final Map headers; + public final ModelConfig content; public Response( List status, List contentType, - Map headers, - Validator content) + Map headers, + ModelConfig content) { this.status = status; this.contentType = contentType; @@ -105,10 +106,10 @@ public static final class Builder private String path; private HttpRequestConfig.Method method; private List contentType; - private Map headers; - private Map pathParams; - private Map queryParams; - private Validator content; + private Map headers; + private Map pathParams; + private Map queryParams; + private ModelConfig content; private List responses; public Builder path( @@ -133,28 +134,28 @@ public Builder contentType( } public Builder headers( - Map headers) + Map headers) { this.headers = headers; return this; } public Builder pathParams( - Map pathParams) + Map pathParams) { this.pathParams = pathParams; return this; } public Builder queryParams( - Map queryParams) + Map queryParams) { this.queryParams = queryParams; return this; } public Builder content( - Validator content) + ModelConfig content) { this.content = content; return this; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpResponseConfigAdapter.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpResponseConfigAdapter.java index b6dd881fff..a5be563be5 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpResponseConfigAdapter.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpResponseConfigAdapter.java @@ -31,8 +31,8 @@ import io.aklivity.zilla.runtime.binding.http.config.HttpParamConfig; import io.aklivity.zilla.runtime.binding.http.config.HttpResponseConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapter; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfigAdapter; public class HttpResponseConfigAdapter implements JsonbAdapter { @@ -41,7 +41,7 @@ public class HttpResponseConfigAdapter implements JsonbAdapter, Validator> createValidator; + private final Function supplyValidator; private final EnumMap decodersByFrameType; { @@ -311,6 +310,8 @@ public final class HttpClientFactory implements HttpStreamFactory private final Map headersMap; private final String16FW h2cSettingsPayload; private final HttpConfiguration config; + private final EngineContext context; + private final boolean verbose; private final Http2Settings initialSettings; private final MutableDirectBuffer frameBuffer; private final MutableDirectBuffer writeBuffer; @@ -345,6 +346,7 @@ public HttpClientFactory( HttpConfiguration config, EngineContext context) { + this.context = context; this.config = config; this.proxyTypeId = context.supplyTypeId("proxy"); this.writeBuffer = context.writeBuffer(); @@ -377,7 +379,8 @@ public HttpClientFactory( this.maximumPushPromiseListSize = config.maxPushPromiseListSize(); this.decodeMax = bufferPool.slotCapacity(); this.encodeMax = bufferPool.slotCapacity(); - this.createValidator = context::createValidator; + this.supplyValidator = context::supplyValidator; + this.verbose = config.verbose(); final byte[] settingsPayload = new byte[12]; http2SettingsRW.wrap(frameBuffer, 0, frameBuffer.capacity()) @@ -398,7 +401,7 @@ public int originTypeId() public void attach( BindingConfig binding) { - HttpBindingConfig httpBinding = new HttpBindingConfig(binding, createValidator); + HttpBindingConfig httpBinding = new HttpBindingConfig(binding, supplyValidator); bindings.put(binding.id, httpBinding); } @@ -2902,7 +2905,7 @@ private void onDecodeHttp11Headers( } else { - exchange.cleanup(traceId, authorization); + exchange.onResponseInvalid(traceId, authorization); decoder = decodeHttp11Ignore; } } @@ -2928,7 +2931,7 @@ private int onDecodeHttp11Body( boolean valid = true; if (exchange.response != null && exchange.response.content != null) { - valid = exchange.response.content.read(buffer, offset, limit - offset); + valid = exchange.validateResponseContent(buffer, offset, limit - offset); } if (valid) { @@ -2936,7 +2939,7 @@ private int onDecodeHttp11Body( } else { - exchange.doResponseAbort(traceId, authorization, EMPTY_OCTETS); + exchange.onResponseInvalid(traceId, authorization); result = limit; } return result; @@ -3364,7 +3367,7 @@ private int onDecodeHttp2Data( boolean valid = true; if (exchange.response != null && exchange.response.content != null) { - valid = exchange.response.content.read(payload, 0, payloadLength); + valid = exchange.validateResponseContent(payload, 0, payloadLength); } if (valid) { @@ -3377,7 +3380,7 @@ private int onDecodeHttp2Data( } else { - exchange.cleanup(traceId, authorization); + exchange.onResponseInvalid(traceId, authorization); progress += payloadLength; } } @@ -3487,8 +3490,7 @@ else if (headersDecoder.httpError()) } else { - exchange.doResponseAbort(traceId, authorization, EMPTY_OCTETS); - exchange.doRequestReset(traceId, authorization); + exchange.onResponseInvalid(traceId, authorization); doEncodeHttp2RstStream(traceId, streamId, Http2ErrorCode.CANCEL); decoder = decodeHttp2IgnoreAll; } @@ -4516,6 +4518,7 @@ private final class HttpExchange private final HttpBindingConfig binding; private HttpRequestType requestType; private HttpRequestType.Response response; + private ValidatorHandler contentType; private HttpExchange( HttpClient client, @@ -5071,12 +5074,54 @@ public void resolveResponse( HttpBeginExFW beginEx) { this.response = binding.resolveResponse(requestType, beginEx); + this.contentType = response != null && response.content != null + ? supplyValidator.apply(response.content) + : null; } public boolean validateResponseHeaders( HttpBeginExFW beginEx) { - return binding.validateResponseHeaders(response, beginEx); + MutableBoolean valid = new MutableBoolean(true); + if (response != null && response.headers != null) + { + beginEx.headers().forEach(header -> + { + if (valid.value) + { + ValidatorHandler validator = response.headers.get(header.name()); + if (validator != null) + { + String16FW value = header.value(); + valid.value &= + validator.validate(value.value(), value.offset(), value.length(), ValueConsumer.NOP); + } + } + }); + } + return valid.value; + } + + private boolean validateResponseContent( + DirectBuffer buffer, + int index, + int length) + { + return contentType == null || + contentType.validate(buffer, index, length, ValueConsumer.NOP); + } + + private void onResponseInvalid( + long traceId, + long authorization) + { + if (verbose) + { + System.out.printf("%s:%s %s: Skipping invalid response on method %s, path %s\n", + System.currentTimeMillis(), context.supplyNamespace(routedId), + context.supplyLocalName(routedId), requestType.method, requestType.path); + } + cleanup(traceId, authorization); } } diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java index 76cfa97c84..39b9bea739 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java @@ -57,13 +57,11 @@ import java.util.Set; import java.util.SortedSet; import java.util.function.BiConsumer; -import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongFunction; import java.util.function.LongSupplier; import java.util.function.LongUnaryOperator; -import java.util.function.ToLongFunction; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -142,9 +140,10 @@ import io.aklivity.zilla.runtime.engine.buffer.BufferPool; import io.aklivity.zilla.runtime.engine.concurrent.Signaler; import io.aklivity.zilla.runtime.engine.config.BindingConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; import io.aklivity.zilla.runtime.engine.guard.GuardHandler; -import io.aklivity.zilla.runtime.engine.validator.Validator; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; public final class HttpServerFactory implements HttpStreamFactory { @@ -500,7 +499,7 @@ public final class HttpServerFactory implements HttpStreamFactory private final Http2ServerDecoder decodeHttp2IgnoreAll = this::decodeHttp2IgnoreAll; private final EnumMap decodersByFrameType; - private final BiFunction, Validator> createValidator; + private final Function supplyValidator; { final EnumMap decodersByFrameType = new EnumMap<>(Http2FrameType.class); @@ -574,8 +573,8 @@ public HttpServerFactory( this.connectionClose = CONNECTION_CLOSE_PATTERN.matcher(""); this.maximumHeadersSize = bufferPool.slotCapacity(); this.decodeMax = bufferPool.slotCapacity(); + this.supplyValidator = context::supplyValidator; this.encodeMax = bufferPool.slotCapacity(); - this.createValidator = context::createValidator; this.bindings = new Long2ObjectHashMap<>(); this.headers200 = initHeaders(config, STATUS_200); @@ -598,7 +597,7 @@ public int routedTypeId() public void attach( BindingConfig binding) { - HttpBindingConfig httpBinding = new HttpBindingConfig(binding, createValidator); + HttpBindingConfig httpBinding = new HttpBindingConfig(binding, supplyValidator); bindings.put(binding.id, httpBinding); } @@ -1076,9 +1075,9 @@ else if (!isCorsRequestAllowed(server.binding, headers)) HttpPolicyConfig policy = binding.access().effectivePolicy(headers); final String origin = policy == CROSS_ORIGIN ? headers.get(HEADER_NAME_ORIGIN) : null; - server.requestType = binding.resolveRequestType(beginEx); + HttpRequestType requestType = binding.resolveRequestType(beginEx); boolean headersValid = server.onDecodeHeaders(server.routedId, route.id, traceId, exchangeAuth, - policy, origin, beginEx); + policy, origin, beginEx, requestType); if (!headersValid) { error = response400; @@ -1583,7 +1582,6 @@ private final class HttpServer private long replyAck; private long replyBudgetId; private int replyMax; - private HttpRequestType requestType; private HttpServer( HttpBindingConfig binding, @@ -2256,12 +2254,14 @@ private boolean onDecodeHeaders( long authorization, HttpPolicyConfig policy, String origin, - HttpBeginExFW beginEx) + HttpBeginExFW beginEx, + HttpRequestType requestType) { - boolean headersValid = binding.validateHeaders(requestType, beginEx); + final HttpExchange exchange = new HttpExchange(originId, routedId, authorization, + traceId, policy, origin, requestType); + boolean headersValid = exchange.validateHeaders(beginEx); if (headersValid) { - final HttpExchange exchange = new HttpExchange(originId, routedId, authorization, traceId, policy, origin); exchange.doRequestBegin(traceId, beginEx); exchange.doResponseWindow(traceId); @@ -2293,7 +2293,7 @@ private int onDecodeBody( int limit, Flyweight extension) { - boolean contentValid = binding.validateContent(requestType, buffer, 0, limit - offset); + boolean contentValid = exchange.validateContent(buffer, 0, limit - offset); int result; if (contentValid) { @@ -2729,6 +2729,8 @@ private final class HttpExchange private final long sessionId; private final HttpPolicyConfig policy; private final String origin; + private final HttpRequestType requestType; + private final ValidatorHandler contentType; private long expiringId; @@ -2755,13 +2757,15 @@ private HttpExchange( long sessionId, long traceId, HttpPolicyConfig policy, - String origin) + String origin, + HttpRequestType requestType) { this.originId = originId; this.routedId = routedId; this.sessionId = sessionId; this.policy = policy; this.origin = origin; + this.requestType = requestType; this.requestId = supplyInitialId.applyAsLong(routedId); this.responseId = supplyReplyId.applyAsLong(requestId); this.requestState = HttpExchangeState.PENDING; @@ -2771,6 +2775,9 @@ private HttpExchange( this.responseRemaining = Integer.MAX_VALUE - encodeMax; this.expiringId = expireIfNecessary(guard, sessionId, originId, routedId, replyId, traceId, 0); + this.contentType = requestType != null && requestType.content != null + ? supplyValidator.apply(requestType.content) + : null; } private void doRequestBegin( @@ -3135,6 +3142,91 @@ private void doResponseChallenge( traceId, sessionId, httpChallengeEx); } + private boolean validateHeaders( + HttpBeginExFW beginEx) + { + String path = beginEx.headers().matchFirst(h -> h.name().equals(HEADER_PATH)).value().asString(); + return requestType == null || + validateHeaderValues(beginEx) && + validatePathParams(path) && + validateQueryParams(path); + } + + private boolean validateHeaderValues( + HttpBeginExFW beginEx) + { + MutableBoolean valid = new MutableBoolean(true); + if (requestType != null && requestType.headers != null) + { + beginEx.headers().forEach(header -> + { + if (valid.value) + { + ValidatorHandler validator = requestType.headers.get(header.name()); + if (validator != null) + { + String16FW value = header.value(); + valid.value &= + validator.validate(value.value(), value.offset(), value.length(), ValueConsumer.NOP); + } + } + }); + } + return valid.value; + } + + private boolean validatePathParams( + String path) + { + Matcher matcher = requestType.pathMatcher.reset(path); + boolean matches = matcher.matches(); + assert matches; + + boolean valid = true; + for (String name : requestType.pathParams.keySet()) + { + String value = matcher.group(name); + if (value != null) + { + String8FW value0 = new String8FW(value); + ValidatorHandler validator = requestType.pathParams.get(name); + if (!validator.validate(value0.value(), value0.offset(), value0.length(), ValueConsumer.NOP)) + { + valid = false; + break; + } + } + } + return valid; + } + + private boolean validateQueryParams( + String path) + { + Matcher matcher = requestType.queryMatcher.reset(path); + boolean valid = true; + while (valid && matcher.find()) + { + String name = matcher.group(1); + ValidatorHandler validator = requestType.queryParams.get(name); + if (validator != null) + { + String8FW value = new String8FW(matcher.group(2)); + valid &= validator.validate(value.value(), value.offset(), value.length(), ValueConsumer.NOP); + } + } + return valid; + } + + private boolean validateContent( + DirectBuffer buffer, + int index, + int length) + { + return contentType == null || + contentType.validate(buffer, index, length, ValueConsumer.NOP); + } + private void cleanupExpiringIfNecessary() { if (expiringId != NO_CANCEL_ID) @@ -4909,7 +5001,7 @@ else if (!isCorsRequestAllowed(binding, headers)) final Http2Exchange exchange = new Http2Exchange(originId, routedId, NO_REQUEST_ID, streamId, exchangeAuth, traceId, policy, origin, contentLength, requestType); - boolean headersValid = binding.validateHeaders(requestType, beginEx); + boolean headersValid = exchange.validateHeaders(beginEx); if (headersValid) { exchange.doRequestBegin(traceId, beginEx); @@ -5123,7 +5215,7 @@ private int onDecodeData( else { final int payloadLength = payload.capacity(); - boolean contentValid = binding.validateContent(exchange.request, payload, 0, payloadLength); + boolean contentValid = exchange.validateContent(payload, 0, payloadLength); if (contentValid) { if (payloadLength > 0) @@ -5580,6 +5672,8 @@ private final class Http2Exchange private final String origin; private final long requestContentLength; private final long sessionId; + private final HttpRequestType requestType; + private final ValidatorHandler contentType; private long responseContentLength; private long responseContentObserved; @@ -5606,8 +5700,6 @@ private final class Http2Exchange private long responseAck; private int responseMax; - private final HttpRequestType request; - private Http2Exchange( long originId, long routedId, @@ -5618,7 +5710,7 @@ private Http2Exchange( HttpPolicyConfig policy, String origin, long requestContentLength, - HttpRequestType request) + HttpRequestType requestType) { this.originId = originId; this.routedId = routedId; @@ -5630,7 +5722,10 @@ private Http2Exchange( this.requestId = requestId == NO_REQUEST_ID ? supplyInitialId.applyAsLong(routedId) : requestId; this.responseId = supplyReplyId.applyAsLong(this.requestId); this.expiringId = expireIfNecessary(guard, sessionId, originId, routedId, replyId, traceId, streamId); - this.request = request; + this.requestType = requestType; + this.contentType = requestType != null && requestType.content != null + ? supplyValidator.apply(requestType.content) + : null; } private int initialWindow() @@ -6167,6 +6262,91 @@ private void setResponseClosed() cleanupExpiringIfNecessary(); } + private boolean validateHeaders( + HttpBeginExFW beginEx) + { + String path = beginEx.headers().matchFirst(h -> h.name().equals(HEADER_PATH)).value().asString(); + return requestType == null || + validateHeaderValues(beginEx) && + validatePathParams(path) && + validateQueryParams(path); + } + + private boolean validateHeaderValues( + HttpBeginExFW beginEx) + { + MutableBoolean valid = new MutableBoolean(true); + if (requestType != null && requestType.headers != null) + { + beginEx.headers().forEach(header -> + { + if (valid.value) + { + ValidatorHandler validator = requestType.headers.get(header.name()); + if (validator != null) + { + String16FW value = header.value(); + valid.value &= + validator.validate(value.value(), value.offset(), value.length(), ValueConsumer.NOP); + } + } + }); + } + return valid.value; + } + + private boolean validatePathParams( + String path) + { + Matcher matcher = requestType.pathMatcher.reset(path); + boolean matches = matcher.matches(); + assert matches; + + boolean valid = true; + for (String name : requestType.pathParams.keySet()) + { + String value = matcher.group(name); + if (value != null) + { + String8FW value0 = new String8FW(value); + ValidatorHandler validator = requestType.pathParams.get(name); + if (!validator.validate(value0.value(), value0.offset(), value0.length(), ValueConsumer.NOP)) + { + valid = false; + break; + } + } + } + return valid; + } + + private boolean validateQueryParams( + String path) + { + Matcher matcher = requestType.queryMatcher.reset(path); + boolean valid = true; + while (valid && matcher.find()) + { + String name = matcher.group(1); + ValidatorHandler validator = requestType.queryParams.get(name); + if (validator != null) + { + String8FW value = new String8FW(matcher.group(2)); + valid &= validator.validate(value.value(), value.offset(), value.length(), ValueConsumer.NOP); + } + } + return valid; + } + + private boolean validateContent( + DirectBuffer buffer, + int index, + int length) + { + return contentType == null || + contentType.validate(buffer, index, length, ValueConsumer.NOP); + } + private void removeStreamIfNecessary() { if (HttpState.closed(state)) diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java index 451c7668e6..2fd6c947ab 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java @@ -42,7 +42,7 @@ import io.aklivity.zilla.runtime.binding.http.config.HttpVersion; import io.aklivity.zilla.runtime.binding.http.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.http.internal.types.String8FW; -import io.aklivity.zilla.runtime.engine.test.internal.validator.config.TestValidatorConfig; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; public class HttpOptionsConfigAdapterTest { @@ -158,16 +158,16 @@ public void shouldReadOptions() assertThat(request.method, equalTo(HttpRequestConfig.Method.GET)); assertThat(request.contentType.get(0), equalTo("application/json")); assertThat(request.headers.get(0).name, equalTo("content-type")); - assertThat(request.headers.get(0).validator, instanceOf(TestValidatorConfig.class)); - assertThat(request.headers.get(0).validator.type, equalTo("test")); + assertThat(request.headers.get(0).model, instanceOf(TestModelConfig.class)); + assertThat(request.headers.get(0).model.model, equalTo("test")); assertThat(request.pathParams.get(0).name, equalTo("id")); - assertThat(request.pathParams.get(0).validator, instanceOf(TestValidatorConfig.class)); - assertThat(request.pathParams.get(0).validator.type, equalTo("test")); + assertThat(request.pathParams.get(0).model, instanceOf(TestModelConfig.class)); + assertThat(request.pathParams.get(0).model.model, equalTo("test")); assertThat(request.queryParams.get(0).name, equalTo("index")); - assertThat(request.queryParams.get(0).validator, instanceOf(TestValidatorConfig.class)); - assertThat(request.queryParams.get(0).validator.type, equalTo("test")); - assertThat(request.content, instanceOf(TestValidatorConfig.class)); - assertThat(request.content.type, equalTo("test")); + assertThat(request.queryParams.get(0).model, instanceOf(TestModelConfig.class)); + assertThat(request.queryParams.get(0).model.model, equalTo("test")); + assertThat(request.content, instanceOf(TestModelConfig.class)); + assertThat(request.content.model, equalTo("test")); } @Test @@ -280,20 +280,20 @@ public void shouldWriteOptions() .contentType("application/json") .header() .name("content-type") - .validator(TestValidatorConfig::builder) + .model(TestModelConfig::builder) .build() .build() .pathParam() .name("id") - .validator(TestValidatorConfig::builder) + .model(TestModelConfig::builder) .build() .build() .queryParam() .name("index") - .validator(TestValidatorConfig::builder) + .model(TestModelConfig::builder) .build() .build() - .content(TestValidatorConfig::builder) + .content(TestModelConfig::builder) .build() .build() .build(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestConfigAdapterTest.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestConfigAdapterTest.java index 66039ea7e4..df829c14d5 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestConfigAdapterTest.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRequestConfigAdapterTest.java @@ -29,7 +29,7 @@ import org.junit.Test; import io.aklivity.zilla.runtime.binding.http.config.HttpRequestConfig; -import io.aklivity.zilla.runtime.engine.test.internal.validator.config.TestValidatorConfig; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; public class HttpRequestConfigAdapterTest { @@ -104,24 +104,24 @@ public void shouldReadOptions() assertThat(request.method, equalTo(HttpRequestConfig.Method.GET)); assertThat(request.contentType.get(0), equalTo("application/json")); assertThat(request.headers.get(0).name, equalTo("content-type")); - assertThat(request.headers.get(0).validator, instanceOf(TestValidatorConfig.class)); - assertThat(request.headers.get(0).validator.type, equalTo("test")); + assertThat(request.headers.get(0).model, instanceOf(TestModelConfig.class)); + assertThat(request.headers.get(0).model.model, equalTo("test")); assertThat(request.pathParams.get(0).name, equalTo("id")); - assertThat(request.pathParams.get(0).validator, instanceOf(TestValidatorConfig.class)); - assertThat(request.pathParams.get(0).validator.type, equalTo("test")); + assertThat(request.pathParams.get(0).model, instanceOf(TestModelConfig.class)); + assertThat(request.pathParams.get(0).model.model, equalTo("test")); assertThat(request.queryParams.get(0).name, equalTo("index")); - assertThat(request.queryParams.get(0).validator, instanceOf(TestValidatorConfig.class)); - assertThat(request.queryParams.get(0).validator.type, equalTo("test")); - assertThat(request.content, instanceOf(TestValidatorConfig.class)); - assertThat(request.content.type, equalTo("test")); + assertThat(request.queryParams.get(0).model, instanceOf(TestModelConfig.class)); + assertThat(request.queryParams.get(0).model.model, equalTo("test")); + assertThat(request.content, instanceOf(TestModelConfig.class)); + assertThat(request.content.model, equalTo("test")); assertThat(request.responses.get(0).status.get(0), equalTo("200")); assertThat(request.responses.get(0).contentType.get(0), equalTo("application/json")); assertThat(request.responses.get(0).headers.get(0).name, equalTo("content-type")); - assertThat(request.responses.get(0).headers.get(0).validator.type, equalTo("test")); - assertThat(request.responses.get(0).content.type, equalTo("test")); + assertThat(request.responses.get(0).headers.get(0).model.model, equalTo("test")); + assertThat(request.responses.get(0).content.model, equalTo("test")); assertThat(request.responses.get(1).status.get(0), equalTo("401")); assertThat(request.responses.get(1).status.get(1), equalTo("404")); - assertThat(request.responses.get(1).content.type, equalTo("test")); + assertThat(request.responses.get(1).content.model, equalTo("test")); } @Test @@ -182,36 +182,36 @@ public void shouldWriteOptions() .contentType("application/json") .header() .name("content-type") - .validator(TestValidatorConfig::builder) + .model(TestModelConfig::builder) .build() .build() .pathParam() .name("id") - .validator(TestValidatorConfig::builder) + .model(TestModelConfig::builder) .build() .build() .queryParam() .name("index") - .validator(TestValidatorConfig::builder) + .model(TestModelConfig::builder) .build() .build() - .content(TestValidatorConfig::builder) + .content(TestModelConfig::builder) .build() .response() .status(200) .contentType("application/json") .header() .name("content-type") - .validator(TestValidatorConfig::builder) + .model(TestModelConfig::builder) .build() .build() - .content(TestValidatorConfig::builder) + .content(TestModelConfig::builder) .build() .build() .response() .status(401) .status(404) - .content(TestValidatorConfig::builder) + .content(TestModelConfig::builder) .build() .build() .build(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ValidationIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ValidationIT.java index 5956500d0e..e82208c3dd 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ValidationIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ValidationIT.java @@ -50,7 +50,7 @@ public class ValidationIT public final TestRule chain = outerRule(engine).around(k3po).around(timeout); @Test - @Configuration("server.validation.yaml") + @Configuration("server.model.yaml") @Specification({ "${net}/invalid.request/client", "${app}/invalid.request/server" }) @@ -60,7 +60,7 @@ public void shouldRejectInvalidRequests() throws Exception } @Test - @Configuration("server.validation.yaml") + @Configuration("server.model.yaml") @Specification({ "${net}/valid.request/client", "${app}/valid.request/server" }) diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/StartingIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/StartingIT.java index 83411f0191..a445a43a84 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/StartingIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/StartingIT.java @@ -20,6 +20,7 @@ import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.DisableOnDebug; @@ -52,6 +53,7 @@ public class StartingIT @Rule public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + @Ignore("Github Actions") @Test @Configuration("client.yaml") @Specification({ diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/FlowControlIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/FlowControlIT.java index 2ff7eb5ca2..cde9d6e9bd 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/FlowControlIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/FlowControlIT.java @@ -19,6 +19,7 @@ import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.DisableOnDebug; @@ -59,6 +60,7 @@ public void streamFlow() throws Exception k3po.finish(); } + @Ignore("Github Actions") @Test @Configuration("server.yaml") @Specification({ diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ValidationIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ValidationIT.java index c694c8956a..a2e427ad51 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ValidationIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ValidationIT.java @@ -50,7 +50,7 @@ public class ValidationIT public final TestRule chain = outerRule(engine).around(k3po).around(timeout); @Test - @Configuration("server.validation.yaml") + @Configuration("server.model.yaml") @Specification({ "${net}/invalid.request/client", "${app}/invalid.request/server" }) @@ -60,7 +60,7 @@ public void shouldRejectInvalidRequests() throws Exception } @Test - @Configuration("server.validation.yaml") + @Configuration("server.model.yaml") @Specification({ "${net}/valid.request/client", "${app}/valid.request/server" }) diff --git a/runtime/binding-kafka-grpc/pom.xml b/runtime/binding-kafka-grpc/pom.xml index c734889816..d7701669e8 100644 --- a/runtime/binding-kafka-grpc/pom.xml +++ b/runtime/binding-kafka-grpc/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-kafka/pom.xml b/runtime/binding-kafka/pom.xml index 706bd03379..78afef40fd 100644 --- a/runtime/binding-kafka/pom.xml +++ b/runtime/binding-kafka/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml @@ -27,7 +27,7 @@ 11 11 0.79 - 3 + 5 diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaOptionsConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaOptionsConfig.java index 6214e58282..bba5a40275 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaOptionsConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaOptionsConfig.java @@ -15,7 +15,12 @@ */ package io.aklivity.zilla.runtime.binding.kafka.config; +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; + import java.util.List; +import java.util.Objects; +import java.util.stream.Stream; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; @@ -32,6 +37,12 @@ public KafkaOptionsConfig( List servers, KafkaSaslConfig sasl) { + super(topics != null && !topics.isEmpty() + ? topics.stream() + .flatMap(t -> Stream.of(t.key, t.value)) + .filter(Objects::nonNull) + .collect(toList()) + : emptyList()); this.bootstrap = bootstrap; this.topics = topics; this.servers = servers; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaTopicConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaTopicConfig.java index d35c471173..8eafe82740 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaTopicConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaTopicConfig.java @@ -19,22 +19,22 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaDeltaType; import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaOffsetType; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; public class KafkaTopicConfig { public final String name; public final KafkaOffsetType defaultOffset; public final KafkaDeltaType deltaType; - public final ValidatorConfig key; - public final ValidatorConfig value; + public final ModelConfig key; + public final ModelConfig value; public KafkaTopicConfig( String name, KafkaOffsetType defaultOffset, KafkaDeltaType deltaType, - ValidatorConfig key, - ValidatorConfig value) + ModelConfig key, + ModelConfig value) { this.name = name; this.defaultOffset = defaultOffset; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplier.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplier.java new file mode 100644 index 0000000000..4a20d002da --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplier.java @@ -0,0 +1,107 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.identity; + +import static io.aklivity.zilla.runtime.common.feature.FeatureFilter.filter; +import static java.util.ServiceLoader.load; + +import java.util.ArrayList; +import java.util.List; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.engine.Configuration; + +public final class KafkaClientIdSupplier +{ + public static KafkaClientIdSupplier instantiate( + Configuration config) + { + return instantiate(config, filter(load(KafkaClientIdSupplierFactorySpi.class))); + } + + private final List suppliers; + + public String get( + String server) + { + String clientId = null; + + match: + for (int index = 0; index < suppliers.size(); index++) + { + KafkaClientIdSupplierSpi supplier = suppliers.get(index); + if (supplier.matches(server)) + { + clientId = supplier.get(); + break match; + } + } + + return clientId; + } + + private KafkaClientIdSupplier( + List suppliers) + { + this.suppliers = suppliers; + } + + private static KafkaClientIdSupplier instantiate( + Configuration config, + Iterable factories) + { + List suppliers = new ArrayList<>(); + + KafkaConfiguration kafka = new KafkaConfiguration(config); + String clientId = kafka.clientId(); + + if (clientId != null) + { + suppliers.add(new Fixed(clientId)); + } + + for (KafkaClientIdSupplierFactorySpi factory : factories) + { + suppliers.add(factory.create(config)); + } + + return new KafkaClientIdSupplier(suppliers); + } + + private static final class Fixed implements KafkaClientIdSupplierSpi + { + private final String clientId; + + private Fixed( + String clientId) + { + this.clientId = clientId; + } + + @Override + public boolean matches( + String server) + { + return true; + } + + @Override + public String get() + { + return clientId; + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplierFactorySpi.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplierFactorySpi.java new file mode 100644 index 0000000000..fd95794276 --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplierFactorySpi.java @@ -0,0 +1,24 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.identity; + +import io.aklivity.zilla.runtime.engine.Configuration; + +public interface KafkaClientIdSupplierFactorySpi +{ + KafkaClientIdSupplierSpi create( + Configuration conifg); +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ValidatorConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplierSpi.java similarity index 73% rename from runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ValidatorConfig.java rename to runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplierSpi.java index 987b89007f..d2713f9809 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ValidatorConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplierSpi.java @@ -13,15 +13,14 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.engine.config; +package io.aklivity.zilla.runtime.binding.kafka.identity; -public abstract class ValidatorConfig +import java.util.function.Supplier; + +public interface KafkaClientIdSupplierSpi extends Supplier { - public final String type; + boolean matches( + String server); - public ValidatorConfig( - String type) - { - this.type = type; - } + String get(); } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java index 789c008afc..01d5c29489 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.binding.kafka.internal; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_CACHE_DIRECTORY; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_VERBOSE; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; @@ -37,6 +38,8 @@ public class KafkaConfiguration extends Configuration public static final boolean DEBUG = Boolean.getBoolean("zilla.binding.kafka.debug"); public static final boolean DEBUG_PRODUCE = DEBUG || Boolean.getBoolean("zilla.binding.kafka.debug.produce"); + public static final String KAFKA_CLIENT_ID_DEFAULT = "zilla"; + public static final IntPropertyDef KAFKA_CLIENT_MAX_IDLE_MILLIS; public static final LongPropertyDef KAFKA_CLIENT_CONNECTION_POOL_CLEANUP_MILLIS; public static final IntPropertyDef KAFKA_CLIENT_META_MAX_AGE_MILLIS; @@ -73,13 +76,14 @@ public class KafkaConfiguration extends Configuration public static final PropertyDef KAFKA_CLIENT_ID; public static final PropertyDef KAFKA_CLIENT_INSTANCE_ID; public static final BooleanPropertyDef KAFKA_CLIENT_CONNECTION_POOL; + public static final BooleanPropertyDef KAFKA_VERBOSE; private static final ConfigurationDef KAFKA_CONFIG; static { final ConfigurationDef config = new ConfigurationDef("zilla.binding.kafka"); - KAFKA_CLIENT_ID = config.property("client.id", "zilla"); + KAFKA_CLIENT_ID = config.property("client.id"); KAFKA_CLIENT_INSTANCE_ID = config.property(InstanceIdSupplier.class, "client.instance.id", KafkaConfiguration::decodeInstanceId, KafkaConfiguration::defaultInstanceId); KAFKA_CLIENT_MAX_IDLE_MILLIS = config.property("client.max.idle.ms", 1 * 60 * 1000); @@ -122,6 +126,7 @@ public class KafkaConfiguration extends Configuration KAFKA_CACHE_SEGMENT_INDEX_BYTES = config.property("cache.segment.index.bytes", 0xA00000); KAFKA_CACHE_CLIENT_TRAILERS_SIZE_MAX = config.property("cache.client.trailers.size.max", 256); KAFKA_CLIENT_CONNECTION_POOL = config.property("client.connection.pool", true); + KAFKA_VERBOSE = config.property("verbose", KafkaConfiguration::supplyVerbose); KAFKA_CONFIG = config; } @@ -196,6 +201,11 @@ public Path cacheDirectory() return KAFKA_CACHE_DIRECTORY.get(this); } + public boolean verbose() + { + return KAFKA_VERBOSE.get(this); + } + public long cacheProduceCapacity() { return KAFKA_CACHE_PRODUCE_CAPACITY.get(this); @@ -305,6 +315,12 @@ public int clientGroupMaxSessionTimeoutDefault() return KAFKA_CLIENT_GROUP_MAX_SESSION_TIMEOUT_DEFAULT.get(this); } + private static boolean supplyVerbose( + Configuration config) + { + return ENGINE_VERBOSE.getAsBoolean(config); + } + private static Path cacheDirectory( Configuration config, String cacheDirectory) diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheCursorFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheCursorFactory.java index 81bc25ebb9..618d265fc7 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheCursorFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheCursorFactory.java @@ -54,10 +54,14 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheDeltaFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCachePaddedValueFW; public final class KafkaCacheCursorFactory { + private static final int NO_CONVERTED_POSITION = -1; + private final KafkaCacheDeltaFW deltaRO = new KafkaCacheDeltaFW(); + private final KafkaCachePaddedValueFW convertedRO = new KafkaCachePaddedValueFW(); private final KafkaValueMatchFW valueMatchRO = new KafkaValueMatchFW(); private final KafkaHeaderFW headerRO = new KafkaHeaderFW(); @@ -68,9 +72,9 @@ public final class KafkaCacheCursorFactory public static final int INDEX_UNSET = -1; public KafkaCacheCursorFactory( - MutableDirectBuffer writeBuffer) + int writeCapacity) { - this.writeBuffer = writeBuffer; + this.writeBuffer = new UnsafeBuffer(ByteBuffer.allocate(writeCapacity)); this.checksum = new CRC32C(); } @@ -212,9 +216,16 @@ public KafkaCacheEntryFW next( nextEntry = null; } - if (nextEntry != null && deltaType != KafkaDeltaType.NONE) + if (nextEntry != null) { - nextEntry = markAncestorIfNecessary(cacheEntry, nextEntry); + if (deltaType != KafkaDeltaType.NONE) + { + nextEntry = markAncestorIfNecessary(cacheEntry, nextEntry); + } + else if (nextEntry.convertedPosition() != NO_CONVERTED_POSITION) + { + nextEntry = nextConvertedEntry(cacheEntry, nextEntry); + } } if (nextEntry == null) @@ -289,6 +300,41 @@ private KafkaCacheEntryFW markAncestorIfNecessary( return nextEntry; } + private KafkaCacheEntryFW nextConvertedEntry( + KafkaCacheEntryFW cacheEntry, + KafkaCacheEntryFW nextEntry) + { + final int convertedAt = nextEntry.convertedPosition(); + assert convertedAt != NO_CONVERTED_POSITION; + + final KafkaCacheFile convertedFile = segment.convertedFile(); + final KafkaCachePaddedValueFW converted = convertedFile.readBytes(convertedAt, convertedRO::wrap); + final OctetsFW convertedValue = converted.value(); + final DirectBuffer entryBuffer = nextEntry.buffer(); + final KafkaKeyFW key = nextEntry.key(); + final int entryOffset = nextEntry.offset(); + final ArrayFW headers = nextEntry.headers(); + final ArrayFW trailers = nextEntry.trailers(); + + final int sizeofEntryHeader = key.limit() - nextEntry.offset(); + + int writeLimit = 0; + writeBuffer.putBytes(writeLimit, entryBuffer, entryOffset, sizeofEntryHeader); + writeLimit += sizeofEntryHeader; + writeBuffer.putInt(writeLimit, convertedValue.sizeof()); + writeLimit += Integer.BYTES; + writeBuffer.putBytes(writeLimit, convertedValue.buffer(), convertedValue.offset(), convertedValue.sizeof()); + writeLimit += convertedValue.sizeof(); + writeBuffer.putBytes(writeLimit, headers.buffer(), headers.offset(), headers.sizeof()); + writeLimit += headers.sizeof(); + writeBuffer.putBytes(writeLimit, trailers.buffer(), trailers.offset(), trailers.sizeof()); + writeLimit += trailers.sizeof(); + writeBuffer.putInt(writeLimit, 0); + writeLimit += Integer.BYTES; + + return cacheEntry.wrap(writeBuffer, 0, writeLimit); + } + public void advance( long offset) { diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheFile.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheFile.java index 6ab696d04c..b031aff963 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheFile.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheFile.java @@ -41,6 +41,7 @@ public class KafkaCacheFile implements AutoCloseable { private static final String EXT_LOG = ".log"; + private static final String EXT_CONVERTED = ".converted"; private static final String EXT_DELTA = ".delta"; private static final String EXT_INDEX = ".index"; private static final String EXT_HSCAN = ".hscan"; @@ -55,6 +56,7 @@ public class KafkaCacheFile implements AutoCloseable private static final String FORMAT_FILE = "%%019d%s"; private static final String FORMAT_LOG_FILE = String.format(FORMAT_FILE, EXT_LOG); + private static final String FORMAT_CONVERTED_FILE = String.format(FORMAT_FILE, EXT_CONVERTED); private static final String FORMAT_DELTA_FILE = String.format(FORMAT_FILE, EXT_DELTA); private static final String FORMAT_INDEX_FILE = String.format(FORMAT_FILE, EXT_INDEX); private static final String FORMAT_HSCAN_FILE = String.format(FORMAT_FILE, EXT_HSCAN); @@ -103,6 +105,11 @@ public KafkaCacheFile( this.maxCapacity = mappedBuf.capacity(); } + public DirectBuffer buffer() + { + return mappedBuf; + } + public Path location() { return location; @@ -147,11 +154,13 @@ public long readLong( return mappedBuf.getLong(position); } - public void writeBytes( + public int writeBytes( int position, Flyweight flyweight) { - writeBytes(position, flyweight.buffer(), flyweight.offset(), flyweight.sizeof()); + final int length = flyweight.sizeof(); + writeBytes(position, flyweight.buffer(), flyweight.offset(), length); + return length; } public void writeBytes( @@ -578,4 +587,23 @@ public Delta( super(location.resolve(String.format(FORMAT_DELTA_FILE, baseOffset))); } } + + public static final class Converted extends KafkaCacheFile + { + public Converted( + Path location, + long baseOffset, + int capacity, + MutableDirectBuffer appendBuf) + { + super(location.resolve(String.format(FORMAT_CONVERTED_FILE, baseOffset)), capacity, appendBuf); + } + + public Converted( + Path location, + long baseOffset) + { + super(location.resolve(String.format(FORMAT_CONVERTED_FILE, baseOffset))); + } + } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCachePartition.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCachePartition.java index 8f7fbcef4f..9bcc7d9404 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCachePartition.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCachePartition.java @@ -21,11 +21,22 @@ import static io.aklivity.zilla.runtime.binding.kafka.internal.cache.KafkaCacheIndexRecord.SIZEOF_INDEX_RECORD; import static io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaDeltaType.JSON_PATCH; import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_ACKNOWLEDGE; +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_ACK_MODE; +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_ANCESTOR; +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_CONVERTED_POSITION; import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_DELTA_POSITION; import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_DESCENDANT; import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_FLAGS; +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_KEY; +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_OFFSET; +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_OWNER_ID; +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_PRODUCER_EPOCH; +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_PRODUCER_ID; +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_SEQUENCE; +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW.FIELD_OFFSET_TIMESTAMP; import static java.nio.ByteBuffer.allocateDirect; import static java.util.Objects.requireNonNull; +import static org.agrona.BitUtil.SIZE_OF_INT; import java.io.IOException; import java.nio.ByteBuffer; @@ -52,7 +63,6 @@ import org.agrona.io.DirectBufferInputStream; import org.agrona.io.ExpandableDirectBufferOutputStream; -import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaTopicType; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.ArrayFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; @@ -62,9 +72,12 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaKeyFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaOffsetType; import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Varint32FW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheDeltaFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW; -import io.aklivity.zilla.runtime.engine.validator.Validator; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; public final class KafkaCachePartition { @@ -72,12 +85,17 @@ public final class KafkaCachePartition private static final long NO_ANCESTOR_OFFSET = -1L; private static final long NO_DESCENDANT_OFFSET = -1L; private static final int NO_SEQUENCE = -1; + private static final short NO_PRODUCER_ID = -1; + private static final short NO_PRODUCER_EPOCH = -1; private static final int NO_ACKNOWLEDGE = 0; + private static final int NO_CONVERTED_POSITION = -1; private static final int NO_DELTA_POSITION = -1; private static final String FORMAT_FETCH_PARTITION_DIRECTORY = "%s-%d"; private static final String FORMAT_PRODUCE_PARTITION_DIRECTORY = "%s-%d-%d"; + private static final int FLAGS_COMPLETE = 0x03; + private static final int FLAGS_FIN = 0x01; public static final int CACHE_ENTRY_FLAGS_DIRTY = 0x01; public static final int CACHE_ENTRY_FLAGS_COMPLETED = 0x02; public static final int CACHE_ENTRY_FLAGS_ABORTED = 0x04; @@ -98,9 +116,10 @@ public final class KafkaCachePartition private final KafkaCacheEntryFW logEntryRO = new KafkaCacheEntryFW(); private final KafkaCacheDeltaFW deltaEntryRO = new KafkaCacheDeltaFW(); - private final MutableDirectBuffer entryInfo = new UnsafeBuffer(new byte[6 * Long.BYTES + 3 * Integer.BYTES + Short.BYTES]); + private final MutableDirectBuffer entryInfo = new UnsafeBuffer(new byte[FIELD_OFFSET_KEY]); private final MutableDirectBuffer valueInfo = new UnsafeBuffer(new byte[Integer.BYTES]); + private final Varint32FW.Builder varIntRW = new Varint32FW.Builder().wrap(new UnsafeBuffer(new byte[5]), 0, 5); private final Array32FW headersRO = new Array32FW(new KafkaHeaderFW()); private final DirectBufferInputStream ancestorIn = new DirectBufferInputStream(); @@ -314,7 +333,11 @@ public Node newHeadIfNecessary( } public void writeEntry( + EngineContext context, + long bindingId, long offset, + MutableInteger entryMark, + MutableInteger valueMark, long timestamp, long producerId, KafkaKeyFW key, @@ -323,17 +346,24 @@ public void writeEntry( KafkaCacheEntryFW ancestor, int entryFlags, KafkaDeltaType deltaType, - KafkaTopicType type) + ConverterHandler convertKey, + ConverterHandler convertValue, + boolean verbose) { final long keyHash = computeHash(key); final int valueLength = value != null ? value.sizeof() : -1; - writeEntryStart(offset, timestamp, producerId, key, keyHash, valueLength, ancestor, entryFlags, deltaType); - writeEntryContinue(value); - writeEntryFinish(headers, deltaType, type); + writeEntryStart(context, bindingId, offset, entryMark, valueMark, timestamp, producerId, key, + keyHash, valueLength, ancestor, entryFlags, deltaType, value, convertKey, convertValue, verbose); + writeEntryContinue(context, bindingId, FLAGS_COMPLETE, offset, entryMark, valueMark, value, convertValue, verbose); + writeEntryFinish(headers, deltaType); } public void writeEntryStart( + EngineContext context, + long bindingId, long offset, + MutableInteger entryMark, + MutableInteger valueMark, long timestamp, long producerId, KafkaKeyFW key, @@ -341,7 +371,11 @@ public void writeEntryStart( int valueLength, KafkaCacheEntryFW ancestor, int entryFlags, - KafkaDeltaType deltaType) + KafkaDeltaType deltaType, + OctetsFW payload, + ConverterHandler convertKey, + ConverterHandler convertValue, + boolean verbose) { assert offset > this.progress : String.format("%d > %d", offset, this.progress); this.progress = offset; @@ -357,6 +391,9 @@ public void writeEntryStart( final KafkaCacheFile hashFile = segment.hashFile(); final KafkaCacheFile keysFile = segment.keysFile(); final KafkaCacheFile nullsFile = segment.nullsFile(); + final KafkaCacheFile convertedFile = segment.convertedFile(); + + final int valueMaxLength = valueLength == -1 ? 0 : valueLength; logFile.mark(); @@ -370,21 +407,63 @@ public void writeEntryStart( assert deltaPosition == NO_DELTA_POSITION || ancestor != null; this.ancestorEntry = ancestor; - entryInfo.putLong(0, progress); - entryInfo.putLong(Long.BYTES, timestamp); - entryInfo.putLong(2 * Long.BYTES, producerId); - entryInfo.putLong(3 * Long.BYTES, NO_ACKNOWLEDGE); - entryInfo.putInt(4 * Long.BYTES, NO_SEQUENCE); - entryInfo.putLong(4 * Long.BYTES + Integer.BYTES, ancestorOffset); - entryInfo.putLong(5 * Long.BYTES + Integer.BYTES, NO_DESCENDANT_OFFSET); - entryInfo.putInt(6 * Long.BYTES + Integer.BYTES, entryFlags); - entryInfo.putInt(6 * Long.BYTES + 2 * Integer.BYTES, deltaPosition); - entryInfo.putShort(6 * Long.BYTES + 3 * Integer.BYTES, KafkaAckMode.NONE.value()); + int convertedPos = NO_CONVERTED_POSITION; + if (convertValue != ConverterHandler.NONE) + { + int convertedPadding = convertValue.padding(payload.buffer(), payload.offset(), payload.sizeof()); + int convertedMaxLength = valueMaxLength + convertedPadding; + + convertedPos = convertedFile.capacity(); + convertedFile.advance(convertedPos + convertedMaxLength + SIZE_OF_INT * 2); + + convertedFile.writeInt(convertedPos, 0); // length + convertedFile.writeInt(convertedPos + SIZE_OF_INT, convertedMaxLength); // padding + } + + entryMark.value = logFile.capacity(); + + entryInfo.putLong(FIELD_OFFSET_OFFSET, progress); + entryInfo.putLong(FIELD_OFFSET_TIMESTAMP, timestamp); + entryInfo.putLong(FIELD_OFFSET_OWNER_ID, producerId); + entryInfo.putLong(FIELD_OFFSET_ACKNOWLEDGE, NO_ACKNOWLEDGE); + entryInfo.putInt(FIELD_OFFSET_SEQUENCE, NO_SEQUENCE); + entryInfo.putLong(FIELD_OFFSET_ANCESTOR, ancestorOffset); + entryInfo.putLong(FIELD_OFFSET_DESCENDANT, NO_DESCENDANT_OFFSET); + entryInfo.putInt(FIELD_OFFSET_FLAGS, entryFlags); + entryInfo.putInt(FIELD_OFFSET_CONVERTED_POSITION, convertedPos); + entryInfo.putInt(FIELD_OFFSET_DELTA_POSITION, deltaPosition); + entryInfo.putShort(FIELD_OFFSET_ACK_MODE, KafkaAckMode.NONE.value()); logFile.appendBytes(entryInfo); - logFile.appendBytes(key); + if (key.value() == null) + { + logFile.appendBytes(key); + } + else + { + final ValueConsumer writeKey = (buffer, index, length) -> + { + Varint32FW newLength = varIntRW.set(length).build(); + logFile.appendBytes(newLength); + logFile.appendBytes(buffer, index, length); + }; + OctetsFW value = key.value(); + int converted = convertKey.convert(value.buffer(), value.offset(), value.sizeof(), writeKey); + if (converted == -1) + { + logFile.writeInt(entryMark.value + FIELD_OFFSET_FLAGS, CACHE_ENTRY_FLAGS_ABORTED); + if (verbose) + { + System.out.printf("%s:%s %s: Skipping invalid message on topic %s, partition %d, offset %d\n", + System.currentTimeMillis(), context.supplyNamespace(bindingId), + context.supplyLocalName(bindingId), topic, id, offset); + } + } + } logFile.appendInt(valueLength); + valueMark.value = logFile.capacity(); + final long hashEntry = keyHash << 32 | logFile.markValue(); hashFile.appendLong(hashEntry); @@ -401,7 +480,15 @@ public void writeEntryStart( } public void writeEntryContinue( - OctetsFW payload) + EngineContext context, + long bindingId, + int flags, + long offset, + MutableInteger entryMark, + MutableInteger valueMark, + OctetsFW payload, + ConverterHandler convertValue, + boolean verbose) { final Node head = sentinel.previous; assert head != sentinel; @@ -410,18 +497,53 @@ public void writeEntryContinue( assert headSegment != null; final KafkaCacheFile logFile = headSegment.logFile(); + final KafkaCacheFile convertedFile = headSegment.convertedFile(); final int logAvailable = logFile.available(); final int logRequired = payload.sizeof(); assert logAvailable >= logRequired; logFile.appendBytes(payload.buffer(), payload.offset(), payload.sizeof()); + + if (payload != null && convertValue != ConverterHandler.NONE) + { + final ValueConsumer consumeConverted = (buffer, index, length) -> + { + final int convertedLengthAt = logFile.readInt(entryMark.value + FIELD_OFFSET_CONVERTED_POSITION); + final int convertedLength = convertedFile.readInt(convertedLengthAt); + final int convertedValueLimit = convertedLengthAt + SIZE_OF_INT + convertedLength; + final int convertedPadding = convertedFile.readInt(convertedValueLimit); + + assert convertedPadding - length >= 0; + + convertedFile.writeInt(convertedLengthAt, convertedLength + length); + convertedFile.writeBytes(convertedValueLimit, buffer, index, length); + convertedFile.writeInt(convertedValueLimit + length, convertedPadding - length); + }; + + final int valueLength = logFile.capacity() - valueMark.value; + int entryFlags = logFile.readInt(entryMark.value + FIELD_OFFSET_FLAGS); + + if ((flags & FLAGS_FIN) != 0x00 && (entryFlags & CACHE_ENTRY_FLAGS_ABORTED) == 0x00) + { + int converted = convertValue.convert(logFile.buffer(), valueMark.value, valueLength, consumeConverted); + if (converted == -1) + { + logFile.writeInt(entryMark.value + FIELD_OFFSET_FLAGS, CACHE_ENTRY_FLAGS_ABORTED); + if (verbose) + { + System.out.printf("%s:%s %s: Skipping invalid message on topic %s, partition %d, offset %d\n", + System.currentTimeMillis(), context.supplyNamespace(bindingId), + context.supplyLocalName(bindingId), topic, id, offset); + } + } + } + } } public void writeEntryFinish( ArrayFW headers, - KafkaDeltaType deltaType, - KafkaTopicType type) + KafkaDeltaType deltaType) { final Node head = sentinel.previous; assert head != sentinel; @@ -497,45 +619,29 @@ public void writeEntryFinish( deltaFile.appendBytes(diffBuffer, 0, Integer.BYTES + deltaLength); } - if (type != null) - { - if (type.key != null) - { - OctetsFW key = headEntry.key() != null ? headEntry.key().value() : null; - if (key != null && - !type.key.read(key.value(), key.offset(), key.sizeof())) - { - // Placeholder to log Invalid events - } - } - - if (type.value != null) - { - OctetsFW value = headEntry.value(); - if (value != null && - !type.value.read(value.value(), value.offset(), value.sizeof())) - { - // Placeholder to log Invalid events - } - } - } headSegment.lastOffset(progress); } - public void writeProduceEntryStart( + public int writeProduceEntryStart( long offset, Node head, MutableInteger entryMark, - MutableInteger position, + MutableInteger valueMark, + MutableInteger valueLimit, long timestamp, long ownerId, + long producerId, + short producerEpoch, int sequence, KafkaAckMode ackMode, KafkaKeyFW key, long keyHash, int valueLength, ArrayFW headers, - int trailersSizeMax) + int trailersSizeMax, + OctetsFW payload, + ConverterHandler convertKey, + ConverterHandler convertValue) { assert offset > this.progress : String.format("%d > %d", offset, this.progress); this.progress = offset; @@ -545,65 +651,140 @@ public void writeProduceEntryStart( final KafkaCacheFile indexFile = segment.indexFile(); final KafkaCacheFile logFile = segment.logFile(); + final KafkaCacheFile convertedFile = segment.convertedFile(); + + final int valueMaxLength = valueLength == -1 ? 0 : valueLength; + + int convertedPos = NO_CONVERTED_POSITION; + if (convertValue != ConverterHandler.NONE) + { + int convertedPadding = convertValue.padding(payload.buffer(), payload.offset(), payload.sizeof()); + int convertedMaxLength = valueMaxLength + convertedPadding; + + convertedPos = convertedFile.capacity(); + convertedFile.advance(convertedPos + convertedMaxLength + SIZE_OF_INT * 2); + + convertedFile.writeInt(convertedPos, 0); // length + convertedFile.writeInt(convertedPos + SIZE_OF_INT, convertedMaxLength); // padding + } entryMark.value = logFile.capacity(); - entryInfo.putLong(0, progress); - entryInfo.putLong(Long.BYTES, timestamp); - entryInfo.putLong(2 * Long.BYTES, ownerId); - entryInfo.putLong(3 * Long.BYTES, NO_ACKNOWLEDGE); - entryInfo.putInt(4 * Long.BYTES, sequence); - entryInfo.putLong(4 * Long.BYTES + Integer.BYTES, NO_ANCESTOR_OFFSET); - entryInfo.putLong(5 * Long.BYTES + Integer.BYTES, NO_DESCENDANT_OFFSET); - entryInfo.putInt(6 * Long.BYTES + Integer.BYTES, 0x00); - entryInfo.putInt(6 * Long.BYTES + 2 * Integer.BYTES, NO_DELTA_POSITION); - entryInfo.putShort(6 * Long.BYTES + 3 * Integer.BYTES, ackMode.value()); + entryInfo.putLong(FIELD_OFFSET_OFFSET, progress); + entryInfo.putLong(FIELD_OFFSET_TIMESTAMP, timestamp); + entryInfo.putLong(FIELD_OFFSET_OWNER_ID, ownerId); + entryInfo.putLong(FIELD_OFFSET_ACKNOWLEDGE, NO_ACKNOWLEDGE); + entryInfo.putLong(FIELD_OFFSET_PRODUCER_ID, producerId); + entryInfo.putShort(FIELD_OFFSET_PRODUCER_EPOCH, producerEpoch); + entryInfo.putInt(FIELD_OFFSET_SEQUENCE, sequence); + entryInfo.putLong(FIELD_OFFSET_ANCESTOR, NO_ANCESTOR_OFFSET); + entryInfo.putLong(FIELD_OFFSET_DESCENDANT, NO_DESCENDANT_OFFSET); + entryInfo.putInt(FIELD_OFFSET_FLAGS, 0x00); + entryInfo.putInt(FIELD_OFFSET_CONVERTED_POSITION, convertedPos); + entryInfo.putInt(FIELD_OFFSET_DELTA_POSITION, NO_DELTA_POSITION); + entryInfo.putShort(FIELD_OFFSET_ACK_MODE, ackMode.value()); logFile.appendBytes(entryInfo); - logFile.appendBytes(key); - logFile.appendInt(valueLength); - - position.value = logFile.capacity(); - final int valueMaxLength = valueLength == -1 ? 0 : valueLength; - final int logAvailable = logFile.available() - valueMaxLength; - final int logRequired = headers.sizeof(); - assert logAvailable >= logRequired : String.format("%s %d >= %d", segment, logAvailable, logRequired); - logFile.advance(position.value + valueMaxLength); - logFile.appendBytes(headers); + int converted = 0; + write: + { + OctetsFW value = key.value(); + if (value == null) + { + logFile.appendBytes(key); + } + else + { + final ValueConsumer writeKey = (buffer, index, length) -> + { + Varint32FW newLength = varIntRW.set(length).build(); + logFile.appendBytes(newLength); + logFile.appendBytes(buffer, index, length); + }; - final int trailersAt = logFile.capacity(); - logFile.advance(logFile.capacity() + trailersSizeMax + SIZEOF_PADDING_LENGTH); - logFile.writeBytes(trailersAt, EMPTY_TRAILERS); // needed for incomplete tryWrap - logFile.writeInt(trailersAt + SIZEOF_EMPTY_TRAILERS, trailersSizeMax - SIZEOF_EMPTY_TRAILERS); + converted = convertKey.convert(value.buffer(), value.offset(), value.sizeof(), writeKey); - final long offsetDelta = (int)(progress - segment.baseOffset()); - final long indexEntry = (offsetDelta << 32) | entryMark.value; - assert indexFile.available() >= Long.BYTES; - indexFile.appendLong(indexEntry); + if (converted == -1) + { + break write; + } + } + logFile.appendInt(valueLength); + + valueMark.value = logFile.capacity(); + valueLimit.value = valueMark.value; + + final int logAvailable = logFile.available() - valueMaxLength; + final int logRequired = headers.sizeof(); + assert logAvailable >= logRequired : String.format("%s %d >= %d", segment, logAvailable, logRequired); + logFile.advance(valueMark.value + valueMaxLength); + logFile.appendBytes(headers); + + final int trailersAt = logFile.capacity(); + logFile.advance(logFile.capacity() + trailersSizeMax + SIZEOF_PADDING_LENGTH); + logFile.writeBytes(trailersAt, EMPTY_TRAILERS); // needed for incomplete tryWrap + logFile.writeInt(trailersAt + SIZEOF_EMPTY_TRAILERS, trailersSizeMax - SIZEOF_EMPTY_TRAILERS); + + final long offsetDelta = (int)(progress - segment.baseOffset()); + final long indexEntry = (offsetDelta << 32) | entryMark.value; + assert indexFile.available() >= Long.BYTES; + indexFile.appendLong(indexEntry); + } + return converted; } - public void writeProduceEntryContinue( + public int writeProduceEntryContinue( + int flags, Node head, - MutableInteger position, - OctetsFW payload) + MutableInteger entryMark, + MutableInteger valueMark, + MutableInteger valueLimit, + OctetsFW payload, + ConverterHandler convertValue) { final KafkaCacheSegment segment = head.segment; assert segment != null; final KafkaCacheFile logFile = segment.logFile(); + final KafkaCacheFile convertedFile = segment.convertedFile(); + + int converted = 0; + if (payload != null) + { + valueLimit.value += logFile.writeBytes(valueLimit.value, payload); + + if (convertValue != ConverterHandler.NONE) + { + final ValueConsumer consumeConverted = (buffer, index, length) -> + { + final int convertedLengthAt = logFile.readInt(entryMark.value + FIELD_OFFSET_CONVERTED_POSITION); + final int convertedLength = convertedFile.readInt(convertedLengthAt); + final int convertedValueLimit = convertedLengthAt + SIZE_OF_INT + convertedLength; + final int convertedPadding = convertedFile.readInt(convertedValueLimit); - final int payloadLength = payload.sizeof(); + assert convertedPadding - length >= 0; - logFile.writeBytes(position.value, payload); + convertedFile.writeInt(convertedLengthAt, convertedLength + length); + convertedFile.writeBytes(convertedValueLimit, buffer, index, length); + convertedFile.writeInt(convertedValueLimit + length, convertedPadding - length); + }; + + final int valueLength = valueLimit.value - valueMark.value; + if ((flags & FLAGS_FIN) != 0x00) + { + converted = convertValue.convert(logFile.buffer(), valueMark.value, valueLength, consumeConverted); + } + } + } - position.value += payloadLength; + return converted; } public void writeProduceEntryFin( Node head, MutableInteger entryMark, - MutableInteger position, + MutableInteger valueLimit, long acknowledge, Array32FW trailers) { @@ -612,68 +793,25 @@ public void writeProduceEntryFin( final KafkaCacheFile logFile = segment.logFile(); - final Array32FW headers = logFile.readBytes(position.value, headersRO::wrap); - position.value += headers.sizeof(); + final Array32FW headers = logFile.readBytes(valueLimit.value, headersRO::wrap); + valueLimit.value += headers.sizeof(); - final int trailersAt = position.value; + final int trailersAt = valueLimit.value; final int trailersSizeMax = SIZEOF_EMPTY_TRAILERS + logFile.readInt(trailersAt + SIZEOF_EMPTY_TRAILERS); if (!trailers.isEmpty()) { - logFile.writeBytes(position.value, trailers); - position.value += trailers.sizeof(); - logFile.writeInt(position.value, trailersSizeMax - trailers.sizeof()); + logFile.writeBytes(valueLimit.value, trailers); + valueLimit.value += trailers.sizeof(); + logFile.writeInt(valueLimit.value, trailersSizeMax - trailers.sizeof()); } - position.value = trailersAt + trailersSizeMax; + valueLimit.value = trailersAt + trailersSizeMax; logFile.writeLong(entryMark.value + FIELD_OFFSET_ACKNOWLEDGE, acknowledge); logFile.writeInt(entryMark.value + FIELD_OFFSET_FLAGS, CACHE_ENTRY_FLAGS_COMPLETED); } - public boolean validProduceEntry( - KafkaTopicType type, - boolean isKey, - OctetsFW data) - { - boolean status = true; - - Validator validator = isKey ? type.key : type.value; - if (data != null && - validator != null && - !validator.write(data.value(), data.offset(), data.sizeof())) - { - status = false; - } - - return status; - } - - public boolean validProduceEntry( - KafkaTopicType type, - boolean isKey, - Node head) - { - final KafkaCacheSegment segment = head.segment; - assert segment != null; - - final KafkaCacheFile logFile = segment.logFile(); - - final KafkaCacheEntryFW headEntry = logFile.readBytes(logFile.markValue(), headEntryRO::wrap); - boolean status = true; - - OctetsFW value = headEntry.value(); - Validator validator = isKey ? type.key : type.value; - if (value != null && - validator != null && - !validator.write(value.value(), value.offset(), value.sizeof())) - { - status = false; - } - - return status; - } - public long retainAt( KafkaCacheSegment segment) { diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheSegment.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheSegment.java index d4c23322dd..b435dd54ee 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheSegment.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCacheSegment.java @@ -35,6 +35,7 @@ public final class KafkaCacheSegment extends KafkaCacheObject private long timestamp; private final KafkaCacheFile logFile; + private final KafkaCacheFile convertedFile; private final KafkaCacheFile deltaFile; private final KafkaCacheIndexFile indexFile; private final KafkaCacheIndexFile hashFile; @@ -78,6 +79,7 @@ public KafkaCacheSegment( this.lastOffset = OFFSET_LIVE; this.timestamp = currentTimeMillis(); this.logFile = new KafkaCacheFile.Log(location, baseOffset, config.segmentBytes, appendBuf); + this.convertedFile = new KafkaCacheFile.Converted(location, baseOffset, config.segmentBytes, appendBuf); this.deltaFile = new KafkaCacheFile.Delta(location, baseOffset, config.segmentBytes, appendBuf); this.indexFile = new KafkaCacheFile.Index(location, baseOffset, config.segmentIndexBytes, appendBuf); this.hashFile = new KafkaCacheFile.HashScan(location, baseOffset, config.segmentIndexBytes, appendBuf, sortSpaceRef); @@ -99,6 +101,7 @@ public KafkaCacheSegment( this.lastOffset = lastOffset; this.timestamp = currentTimeMillis(); this.logFile = new KafkaCacheFile.Log(location, baseOffset); + this.convertedFile = new KafkaCacheFile.Converted(location, baseOffset); this.deltaFile = new KafkaCacheFile.Delta(location, baseOffset); this.indexFile = new KafkaCacheFile.Index(location, baseOffset); this.hashFile = new KafkaCacheFile.HashIndex(location, baseOffset); @@ -153,6 +156,11 @@ public KafkaCacheFile logFile() return logFile; } + public KafkaCacheFile convertedFile() + { + return convertedFile; + } + public KafkaCacheFile deltaFile() { return deltaFile; @@ -181,6 +189,7 @@ public KafkaCacheIndexFile keysFile() public KafkaCacheSegment freeze() { logFile.freeze(); + convertedFile.freeze(); deltaFile.freeze(); indexFile.freeze(); hashFile.freeze(); @@ -202,6 +211,7 @@ public void delete() indexFile.delete(); hashFile.delete(); nullsFile.delete(); + convertedFile.delete(); deltaFile.delete(); keysFile.delete(); } @@ -258,6 +268,7 @@ protected void onClosed() indexFile.close(); hashFile.close(); nullsFile.close(); + convertedFile.close(); deltaFile.close(); keysFile.close(); } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java index 93f6b75181..d590fead82 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java @@ -32,6 +32,7 @@ import io.aklivity.zilla.runtime.engine.EngineContext; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; public final class KafkaBindingConfig { @@ -41,7 +42,10 @@ public final class KafkaBindingConfig public final KindConfig kind; public final List routes; public final ToLongFunction resolveId; - public final Map topics; + public final Map keyReaders; + public final Map keyWriters; + public final Map valueReaders; + public final Map valueWriters; public KafkaBindingConfig( BindingConfig binding, @@ -53,13 +57,38 @@ public KafkaBindingConfig( this.options = KafkaOptionsConfig.class.cast(binding.options); this.routes = binding.routes.stream().map(KafkaRouteConfig::new).collect(toList()); this.resolveId = binding.resolveId; - this.topics = options != null && - options.topics != null - ? options.topics.stream() - .collect(Collectors.toMap(t -> t.name, t -> new KafkaTopicType( - t.key != null ? context.createValidator(t.key, resolveId) : null, - t.value != null ? context.createValidator(t.value, resolveId) : null - ))) : null; + this.keyReaders = options != null && options.topics != null + ? options.topics.stream() + .collect(Collectors.toMap( + t -> t.name, + t -> t.key != null + ? context.supplyReadConverter(t.key) + : ConverterHandler.NONE)) + : null; + this.keyWriters = options != null && options.topics != null + ? options.topics.stream() + .collect(Collectors.toMap( + t -> t.name, + t -> t.key != null + ? context.supplyWriteConverter(t.key) + : ConverterHandler.NONE)) + : null; + this.valueReaders = options != null && options.topics != null + ? options.topics.stream() + .collect(Collectors.toMap( + t -> t.name, + t -> t.value != null + ? context.supplyReadConverter(t.value) + : ConverterHandler.NONE)) + : null; + this.valueWriters = options != null && options.topics != null + ? options.topics.stream() + .collect(Collectors.toMap( + t -> t.name, + t -> t.value != null + ? context.supplyWriteConverter(t.value) + : ConverterHandler.NONE)) + : null; } public KafkaRouteConfig resolve( @@ -117,4 +146,28 @@ public KafkaOffsetType supplyDefaultOffset( KafkaTopicConfig config = topic(topic); return config != null && config.defaultOffset != null ? config.defaultOffset : HISTORICAL; } + + public ConverterHandler resolveKeyReader( + String topic) + { + return keyReaders != null ? keyReaders.getOrDefault(topic, ConverterHandler.NONE) : ConverterHandler.NONE; + } + + public ConverterHandler resolveKeyWriter( + String topic) + { + return keyWriters != null ? keyWriters.getOrDefault(topic, ConverterHandler.NONE) : ConverterHandler.NONE; + } + + public ConverterHandler resolveValueReader( + String topic) + { + return valueReaders != null ? valueReaders.getOrDefault(topic, ConverterHandler.NONE) : ConverterHandler.NONE; + } + + public ConverterHandler resolveValueWriter( + String topic) + { + return valueWriters != null ? valueWriters.getOrDefault(topic, ConverterHandler.NONE) : ConverterHandler.NONE; + } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicConfigAdapter.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicConfigAdapter.java index ea7fcf79b6..2fb99e5ca0 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicConfigAdapter.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicConfigAdapter.java @@ -23,8 +23,8 @@ import io.aklivity.zilla.runtime.binding.kafka.config.KafkaTopicConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaDeltaType; import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaOffsetType; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapter; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfigAdapter; public final class KafkaTopicConfigAdapter implements JsonbAdapter { @@ -35,7 +35,7 @@ public final class KafkaTopicConfigAdapter implements JsonbAdapter(new KafkaHeaderFW.Builder(), new KafkaHeaderFW()) .wrap(new UnsafeBuffer(new byte[8]), 0, 8) .build(); + private static final long PRODUCE_FLUSH_PRODUCER_ID = -1; + private static final short PRODUCE_FLUSH_PRODUCER_EPOCH = -1; private static final int PRODUCE_FLUSH_SEQUENCE = -1; private static final int ERROR_CORRUPT_MESSAGE = 2; @@ -198,7 +200,7 @@ public KafkaCacheClientProduceFactory( this.initialBudgetMax = bufferPool.slotCapacity(); this.localIndex = context.index(); this.cleanupDelay = config.cacheClientCleanupDelay(); - this.cursorFactory = new KafkaCacheCursorFactory(context.writeBuffer()); + this.cursorFactory = new KafkaCacheCursorFactory(context.writeBuffer().capacity()); this.trailersSizeMax = config.cacheClientTrailersSizeMax(); this.reconnectDelay = config.cacheServerReconnect(); } @@ -258,10 +260,11 @@ public MessageConsumer newStream( final KafkaCache cache = supplyCache.apply(cacheName); final KafkaCacheTopic topic = cache.supplyTopic(topicName); final KafkaCachePartition partition = topic.supplyProducePartition(partitionId, localIndex); - final KafkaTopicType type = binding.topics != null ? binding.topics.get(topicName) : null; + final ConverterHandler convertKey = binding.resolveKeyWriter(topicName); + final ConverterHandler convertValue = binding.resolveValueWriter(topicName); final KafkaCacheClientProduceFan newFan = new KafkaCacheClientProduceFan(routedId, resolvedId, authorization, budget, - partition, cacheRoute, topicName, type); + partition, cacheRoute, topicName, convertKey, convertValue); cacheRoute.clientProduceFansByTopicPartition.put(partitionKey, newFan); fan = newFan; @@ -496,6 +499,8 @@ final class KafkaCacheClientProduceFan private final long routedId; private final long authorization; private final int partitionId; + private final ConverterHandler convertKey; + private final ConverterHandler convertValue; private long initialId; private long replyId; @@ -503,7 +508,6 @@ final class KafkaCacheClientProduceFan private KafkaCacheClientBudget budget; private KafkaCacheRoute cacheRoute; private String topicName; - private KafkaTopicType type; private int state; @@ -533,7 +537,8 @@ private KafkaCacheClientProduceFan( KafkaCachePartition partition, KafkaCacheRoute cacheRoute, String topicName, - KafkaTopicType type) + ConverterHandler convertKey, + ConverterHandler convertValue) { this.originId = originId; this.routedId = routedId; @@ -543,7 +548,8 @@ private KafkaCacheClientProduceFan( this.budget = budget; this.cacheRoute = cacheRoute; this.topicName = topicName; - this.type = type; + this.convertKey = convertKey; + this.convertValue = convertValue; this.members = new Long2ObjectHashMap<>(); this.defaultOffset = KafkaOffsetType.LIVE; this.cursor = cursorFactory.newCursor( @@ -679,6 +685,8 @@ private void onClientInitialData( assert kafkaDataEx.kind() == KafkaDataExFW.KIND_PRODUCE; KafkaProduceDataExFW kafkaProduceDataExFW = kafkaDataEx.produce(); final int deferred = kafkaProduceDataExFW.deferred(); + final long producerId = kafkaProduceDataExFW.producerId(); + final short producerEpoch = kafkaProduceDataExFW.producerEpoch(); final int sequence = kafkaProduceDataExFW.sequence(); final Array32FW headers = kafkaProduceDataExFW.headers(); final int headersSizeMax = headers.sizeof() + trailersSizeMax; @@ -688,25 +696,12 @@ private void onClientInitialData( final int valueLength = valueFragment != null ? valueFragment.sizeof() + deferred : -1; final int maxValueLength = valueLength + headersSizeMax; - if ((flags & FLAGS_FIN) == 0x00 && deferred == 0) - { - error = ERROR_CORRUPT_MESSAGE; - break init; - } - if (maxValueLength > partition.segmentBytes()) { error = ERROR_RECORD_LIST_TOO_LARGE; break init; } - if (type != null && - !partition.validProduceEntry(type, true, key.value())) - { - error = ERROR_INVALID_RECORD; - break init; - } - stream.segment = partition.newHeadIfNecessary(partitionOffset, key, valueLength, headersSizeMax); if (stream.segment != null) @@ -716,8 +711,13 @@ private void onClientInitialData( : String.format("%d >= 0 && %d >= %d", partitionOffset, partitionOffset, nextOffset); final long keyHash = partition.computeKeyHash(key); - partition.writeProduceEntryStart(partitionOffset, stream.segment, stream.entryMark, stream.position, - timestamp, stream.initialId, sequence, ackMode, key, keyHash, valueLength, headers, trailersSizeMax); + if (partition.writeProduceEntryStart(partitionOffset, stream.segment, stream.entryMark, stream.valueMark, + stream.valueLimit, timestamp, stream.initialId, producerId, producerEpoch, sequence, ackMode, key, + keyHash, valueLength, headers, trailersSizeMax, valueFragment, convertKey, convertValue) == -1) + { + error = ERROR_INVALID_RECORD; + break init; + } stream.partitionOffset = partitionOffset; partitionOffset++; } @@ -729,14 +729,12 @@ private void onClientInitialData( if (valueFragment != null && error == NO_ERROR) { - partition.writeProduceEntryContinue(stream.segment, stream.position, valueFragment); - } - - if ((flags & FLAGS_FIN) != 0x00 && - type != null && - !partition.validProduceEntry(type, false, stream.segment)) - { - error = ERROR_INVALID_RECORD; + if (partition.writeProduceEntryContinue(flags, stream.segment, + stream.entryMark, stream.valueMark, stream.valueLimit, + valueFragment, convertValue) == -1) + { + error = ERROR_INVALID_RECORD; + } } if ((flags & FLAGS_FIN) != 0x00 && error == NO_ERROR) @@ -757,7 +755,7 @@ private void onClientInitialData( } } - partition.writeProduceEntryFin(stream.segment, stream.entryMark, stream.position, stream.initialSeq, trailers); + partition.writeProduceEntryFin(stream.segment, stream.entryMark, stream.valueLimit, stream.initialSeq, trailers); flushClientFanInitialIfNecessary(traceId); } @@ -792,15 +790,16 @@ private void onClientInitialFlush( : String.format("%d >= 0 && %d >= %d", partitionOffset, partitionOffset, nextOffset); final long keyHash = partition.computeKeyHash(EMPTY_KEY); - partition.writeProduceEntryStart(partitionOffset, stream.segment, stream.entryMark, stream.position, - now().toEpochMilli(), stream.initialId, PRODUCE_FLUSH_SEQUENCE, - KafkaAckMode.LEADER_ONLY, EMPTY_KEY, keyHash, 0, EMPTY_TRAILERS, trailersSizeMax); + partition.writeProduceEntryStart(partitionOffset, stream.segment, stream.entryMark, stream.valueMark, + stream.valueLimit, now().toEpochMilli(), stream.initialId, PRODUCE_FLUSH_PRODUCER_ID, + PRODUCE_FLUSH_PRODUCER_EPOCH, PRODUCE_FLUSH_SEQUENCE, KafkaAckMode.LEADER_ONLY, EMPTY_KEY, keyHash, + 0, EMPTY_TRAILERS, trailersSizeMax, EMPTY_OCTETS, convertKey, convertValue); stream.partitionOffset = partitionOffset; partitionOffset++; Array32FW trailers = EMPTY_TRAILERS; - partition.writeProduceEntryFin(stream.segment, stream.entryMark, stream.position, stream.initialSeq, trailers); + partition.writeProduceEntryFin(stream.segment, stream.entryMark, stream.valueLimit, stream.initialSeq, trailers); flushClientFanInitialIfNecessary(traceId); } else @@ -1207,7 +1206,8 @@ private final class KafkaCacheClientProduceStream { private final KafkaCacheCursor cursor; private final MutableInteger entryMark; - private final MutableInteger position; + private final MutableInteger valueLimit; + private final MutableInteger valueMark; private final KafkaCacheClientProduceFan fan; private final MessageConsumer sender; private final long originId; @@ -1247,7 +1247,8 @@ private final class KafkaCacheClientProduceStream .asCondition(EMPTY_FILTER, KafkaEvaluation.LAZY), KafkaDeltaType.NONE); this.entryMark = new MutableInteger(0); - this.position = new MutableInteger(0); + this.valueMark = new MutableInteger(0); + this.valueLimit = new MutableInteger(0); this.fan = fan; this.sender = sender; this.originId = originId; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java index 62f21227da..86ef3ce680 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java @@ -46,9 +46,10 @@ public final class KafkaCacheGroupFactory implements BindingHandler { + private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); private static final Consumer EMPTY_EXTENSION = ex -> {}; - private final BeginFW beginRO = new BeginFW(); private final DataFW dataRO = new DataFW(); private final EndFW endRO = new EndFW(); @@ -66,7 +67,6 @@ public final class KafkaCacheGroupFactory implements BindingHandler private final AbortFW.Builder abortRW = new AbortFW.Builder(); private final ResetFW.Builder resetRW = new ResetFW.Builder(); private final WindowFW.Builder windowRW = new WindowFW.Builder(); - private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); private final int kafkaTypeId; private final MutableDirectBuffer writeBuffer; @@ -176,35 +176,6 @@ private MessageConsumer newStream( return receiver; } - private void doBegin( - MessageConsumer receiver, - long originId, - long routedId, - long streamId, - long sequence, - long acknowledge, - int maximum, - long traceId, - long authorization, - long affinity, - Consumer extension) - { - final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) - .originId(originId) - .routedId(routedId) - .streamId(streamId) - .sequence(sequence) - .acknowledge(acknowledge) - .maximum(maximum) - .traceId(traceId) - .authorization(authorization) - .affinity(affinity) - .extension(extension) - .build(); - - receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); - } - private void doBegin( MessageConsumer receiver, long originId, @@ -392,7 +363,8 @@ private void doReset( long acknowledge, int maximum, long traceId, - long authorization) + long authorization, + Flyweight extension) { final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -403,6 +375,7 @@ private void doReset( .maximum(maximum) .traceId(traceId) .authorization(authorization) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) .build(); sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); @@ -526,6 +499,7 @@ private void onGroupInitialReset( final long sequence = reset.sequence(); final long acknowledge = reset.acknowledge(); final long traceId = reset.traceId(); + final OctetsFW extension = reset.extension(); assert acknowledge <= sequence; assert acknowledge >= delegate.initialAck; @@ -535,7 +509,7 @@ private void onGroupInitialReset( assert delegate.initialAck <= delegate.initialSeq; - delegate.doGroupInitialReset(traceId); + delegate.doGroupInitialReset(traceId, extension); } @@ -549,7 +523,6 @@ private void onGroupInitialWindow( final long traceId = window.traceId(); final long budgetId = window.budgetId(); final int padding = window.padding(); - final int capabilities = window.capabilities(); assert acknowledge <= sequence; assert acknowledge >= delegate.initialAck; @@ -700,7 +673,7 @@ private void doGroupReplyReset( if (!KafkaState.replyClosed(state)) { doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization); + traceId, authorization, EMPTY_OCTETS); state = KafkaState.closedReply(state); } @@ -725,8 +698,6 @@ private final class KafkaCacheGroupApp private final KafkaCacheGroupNet group; private final MessageConsumer sender; private final String groupId; - private final String protocol; - private final int timeout; private final long originId; private final long routedId; private final long initialId; @@ -770,8 +741,6 @@ private final class KafkaCacheGroupApp this.affinity = affinity; this.authorization = authorization; this.groupId = groupId; - this.protocol = protocol; - this.timeout = timeout; } private void onGroupMessage( @@ -821,8 +790,6 @@ private void onGroupInitialBegin( final long sequence = begin.sequence(); final long acknowledge = begin.acknowledge(); final long traceId = begin.traceId(); - final long authorization = begin.authorization(); - final long affinity = begin.affinity(); final OctetsFW extension = begin.extension(); assert acknowledge <= sequence; @@ -916,14 +883,15 @@ private void onGroupInitialAbort( } private void doGroupInitialReset( - long traceId) + long traceId, + Flyweight extension) { if (!KafkaState.initialClosed(state)) { state = KafkaState.closedInitial(state); doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization); + traceId, authorization, extension); } } @@ -1049,7 +1017,7 @@ private void onGroupReplyWindow( private void cleanup( long traceId) { - doGroupInitialReset(traceId); + doGroupInitialReset(traceId, EMPTY_OCTETS); doGroupReplyAbort(traceId); group.doGroupInitialAbort(traceId); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheInitProducerIdFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheInitProducerIdFactory.java new file mode 100644 index 0000000000..97ff20b020 --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheInitProducerIdFactory.java @@ -0,0 +1,917 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.LongUnaryOperator; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; + +public final class KafkaCacheInitProducerIdFactory implements BindingHandler +{ + private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + + private final int kafkaTypeId; + private final MutableDirectBuffer writeBuffer; + private final BufferPool bufferPool; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final LongFunction supplyBinding; + + public KafkaCacheInitProducerIdFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding) + { + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.bufferPool = context.bufferPool(); + this.streamFactory = context.streamFactory(); + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyBinding = supplyBinding; + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert (initialId & 0x0000_0000_0000_0001L) != 0L; + + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extension.get(extensionRO::tryWrap); + assert beginEx != null && beginEx.typeId() == kafkaTypeId; + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap); + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_INIT_PRODUCER_ID; + + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, null, null) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + + newStream = new KafkaCacheInitProducerrIdApp( + sender, + originId, + routedId, + initialId, + affinity, + authorization, + resolvedId)::onInitProducerIdMessage; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + OctetsFW extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Flyweight extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + OctetsFW payload, + Flyweight extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Flyweight extension) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + final class KafkaCacheInitProducerrIdNet + { + private final long originId; + private final long routedId; + private final long authorization; + private final KafkaCacheInitProducerrIdApp delegate; + + private long initialId; + private long replyId; + private MessageConsumer receiver; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + private long initialBud; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaCacheInitProducerrIdNet( + KafkaCacheInitProducerrIdApp delegate, + long originId, + long routedId, + long authorization) + { + this.delegate = delegate; + this.originId = originId; + this.routedId = routedId; + this.receiver = MessageConsumer.NOOP; + this.authorization = authorization; + } + + private void doInitProducerIdInitialBegin( + long traceId, + OctetsFW extension) + { + if (KafkaState.closed(state)) + { + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.receiver = newStream(this::onInitProducerIdMessage, + originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, 0L, extension); + state = KafkaState.openingInitial(state); + } + } + + private void doInitProducerIdInitialData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + doData(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + + private void doInitProducerIdInitialEnd( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doEnd(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void doInitProducerIdInitialAbort( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void onInitProducerIdInitialReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + final OctetsFW extension = reset.extension(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + + delegate.initialAck = acknowledge; + state = KafkaState.closedInitial(state); + + assert delegate.initialAck <= delegate.initialSeq; + + delegate.doInitProducerIdInitialReset(traceId, extension); + } + + + private void onInitProducerIdInitialWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + initialBud = budgetId; + state = KafkaState.openedInitial(state); + + assert initialAck <= initialSeq; + + delegate.doInitProducerIdInitialWindow(authorization, traceId, budgetId, padding); + } + + private void onInitProducerIdMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onInitProducerIdReplyBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onInitProducerIdReplyData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onInitProducerIdReplyEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onInitProducerIdReplyAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onInitProducerIdInitialReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onInitProducerIdInitialWindow(window); + break; + default: + break; + } + } + + private void onInitProducerIdReplyBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + delegate.doInitProducerIdReplyBegin(traceId, begin.extension()); + } + + private void onInitProducerIdReplyData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int flags = data.flags(); + final int reserved = data.reserved(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + delegate.doInitProducerIdReplyData(traceId, flags, reserved, payload, extension); + } + + private void onInitProducerIdReplyEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + delegate.doInitProducerIdReplyEnd(traceId); + } + + private void onInitProducerIdReplyAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + delegate.doInitProducerIdReplyAbort(traceId); + } + + private void doInitProducerIdReplyReset( + long traceId, + Flyweight extension) + { + if (!KafkaState.replyClosed(state)) + { + doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, extension); + + state = KafkaState.closedReply(state); + } + } + + private void doInitProducerIdReplyWindow( + long traceId, + long authorization, + long budgetId, + int padding) + { + replyAck = Math.max(delegate.replyAck - replyPad, 0); + replyMax = delegate.replyMax; + + doWindow(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding + replyPad); + } + } + + private final class KafkaCacheInitProducerrIdApp + { + private final KafkaCacheInitProducerrIdNet net; + private final MessageConsumer sender; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final long authorization; + + private int state; + + private long replyBudgetId; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + private long replyBud; + private int replyCap; + + KafkaCacheInitProducerrIdApp( + MessageConsumer sender, + long originId, + long routedId, + long initialId, + long affinity, + long authorization, + long resolvedId) + { + this.net = new KafkaCacheInitProducerrIdNet(this, routedId, resolvedId, authorization); + this.sender = sender; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.authorization = authorization; + } + + private void onInitProducerIdMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onInitProducerIdInitialBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onInitProducerIdInitialData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onInitProducerIdInitialEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onInitProducerIdInitialAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onInitProducerIdReplyWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onInitProducerIdReplyReset(reset); + break; + default: + break; + } + } + + private void onInitProducerIdInitialBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final long traceId = begin.traceId(); + final OctetsFW extension = begin.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + initialAck = acknowledge; + state = KafkaState.openingInitial(state); + + assert initialAck <= initialSeq; + + net.doInitProducerIdInitialBegin(traceId, extension); + } + + private void onInitProducerIdInitialData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + + assert initialAck <= initialSeq; + + net.doInitProducerIdInitialData(traceId, authorization, budgetId, reserved, flags, payload, extension); + } + + private void onInitProducerIdInitialEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + net.doInitProducerIdInitialEnd(traceId); + } + + private void onInitProducerIdInitialAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + net.doInitProducerIdInitialAbort(traceId); + } + + private void doInitProducerIdInitialReset( + long traceId, + Flyweight extension) + { + if (!KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, extension); + } + } + + private void doInitProducerIdInitialWindow( + long authorization, + long traceId, + long budgetId, + int padding) + { + initialAck = net.initialAck; + initialMax = net.initialMax; + + doWindow(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, padding); + } + + private void doInitProducerIdReplyBegin( + long traceId, + OctetsFW extension) + { + state = KafkaState.openingReply(state); + + doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, extension); + } + + private void doInitProducerIdReplyData( + long traceId, + int flag, + int reserved, + OctetsFW payload, + Flyweight extension) + { + + doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, flag, reserved, payload, extension); + + replySeq += reserved; + } + + private void doInitProducerIdReplyEnd( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doEnd(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void doInitProducerIdReplyAbort( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doAbort(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void onInitProducerIdReplyReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final int maximum = reset.maximum(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + cleanup(traceId); + } + + private void onInitProducerIdReplyWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + replyBud = budgetId; + replyPad = padding; + replyCap = capabilities; + state = KafkaState.openedReply(state); + + assert replyAck <= replySeq; + + net.doInitProducerIdReplyWindow(traceId, acknowledge, budgetId, padding); + } + + private void cleanup( + long traceId) + { + doInitProducerIdInitialReset(traceId, EMPTY_OCTETS); + doInitProducerIdReplyAbort(traceId); + + net.doInitProducerIdInitialAbort(traceId); + net.doInitProducerIdReplyReset(traceId, EMPTY_OCTETS); + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetCommitFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetCommitFactory.java new file mode 100644 index 0000000000..772a508049 --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetCommitFactory.java @@ -0,0 +1,938 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.LongUnaryOperator; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaOffsetCommitBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; + +public final class KafkaCacheOffsetCommitFactory implements BindingHandler +{ + private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + + private final int kafkaTypeId; + private final MutableDirectBuffer writeBuffer; + private final BufferPool bufferPool; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final LongFunction supplyBinding; + + public KafkaCacheOffsetCommitFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding) + { + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.bufferPool = context.bufferPool(); + this.streamFactory = context.streamFactory(); + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyBinding = supplyBinding; + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert (initialId & 0x0000_0000_0000_0001L) != 0L; + + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extension.get(extensionRO::tryWrap); + assert beginEx != null && beginEx.typeId() == kafkaTypeId; + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap); + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_OFFSET_COMMIT; + KafkaOffsetCommitBeginExFW offsetCommitBeginEx = kafkaBeginEx.offsetCommit(); + final String groupId = offsetCommitBeginEx.groupId().asString(); + final String memberId = offsetCommitBeginEx.memberId().asString(); + final String instanceId = offsetCommitBeginEx.instanceId().asString(); + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, null, groupId) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + + newStream = new KafkaCacheOffsetCommitApp( + sender, + originId, + routedId, + initialId, + affinity, + authorization, + resolvedId, + groupId, + memberId, + instanceId)::onOffsetCommitMessage; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + OctetsFW extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Flyweight extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + OctetsFW payload, + Flyweight extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Flyweight extension) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + final class KafkaCacheOffsetCommitNet + { + private final long originId; + private final long routedId; + private final long authorization; + private final KafkaCacheOffsetCommitApp delegate; + + private long initialId; + private long replyId; + private MessageConsumer receiver; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + private long initialBud; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaCacheOffsetCommitNet( + KafkaCacheOffsetCommitApp delegate, + long originId, + long routedId, + long authorization) + { + this.delegate = delegate; + this.originId = originId; + this.routedId = routedId; + this.receiver = MessageConsumer.NOOP; + this.authorization = authorization; + } + + private void doOffsetCommitInitialBegin( + long traceId, + OctetsFW extension) + { + if (KafkaState.closed(state)) + { + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + if (KafkaConfiguration.DEBUG) + { + System.out.format("%s Offset Commit connect\n", delegate.groupId); + } + + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.receiver = newStream(this::onOffsetCommitMessage, + originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, 0L, extension); + state = KafkaState.openingInitial(state); + } + } + + private void doOffsetCommitInitialData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + doData(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + + private void doOffsetCommitInitialEnd( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doEnd(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void doOffsetCommitInitialAbort( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void onOffsetCommitInitialReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + final OctetsFW extension = reset.extension(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + + delegate.initialAck = acknowledge; + state = KafkaState.closedInitial(state); + + assert delegate.initialAck <= delegate.initialSeq; + + delegate.doOffsetCommitInitialReset(traceId, extension); + } + + + private void onOffsetCommitInitialWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + initialBud = budgetId; + state = KafkaState.openedInitial(state); + + assert initialAck <= initialSeq; + + delegate.doOffsetCommitInitialWindow(authorization, traceId, budgetId, padding); + } + + private void onOffsetCommitMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onOffsetCommitReplyBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onOffsetCommitReplyData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onOffsetCommitReplyEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onOffsetCommitReplyAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onOffsetCommitInitialReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onOffsetCommitInitialWindow(window); + break; + default: + break; + } + } + + private void onOffsetCommitReplyBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + delegate.doOffsetCommitReplyBegin(traceId, begin.extension()); + } + + private void onOffsetCommitReplyData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int flags = data.flags(); + final int reserved = data.reserved(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + delegate.doOffsetCommitReplyData(traceId, flags, reserved, payload, extension); + } + + private void onOffsetCommitReplyEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + delegate.doOffsetCommitReplyEnd(traceId); + } + + private void onOffsetCommitReplyAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + delegate.doOffsetCommitReplyAbort(traceId); + } + + private void doOffsetCommitReplyReset( + long traceId, + Flyweight extension) + { + if (!KafkaState.replyClosed(state)) + { + doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, extension); + + state = KafkaState.closedReply(state); + } + } + + private void doOffsetCommitReplyWindow( + long traceId, + long authorization, + long budgetId, + int padding) + { + replyAck = Math.max(delegate.replyAck - replyPad, 0); + replyMax = delegate.replyMax; + + doWindow(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding + replyPad); + } + } + + private final class KafkaCacheOffsetCommitApp + { + private final KafkaCacheOffsetCommitNet net; + private final MessageConsumer sender; + private final String groupId; + private final String memberId; + private final String instanceId; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final long authorization; + + private int state; + + private long replyBudgetId; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + private long replyBud; + private int replyCap; + + KafkaCacheOffsetCommitApp( + MessageConsumer sender, + long originId, + long routedId, + long initialId, + long affinity, + long authorization, + long resolvedId, + String groupId, + String memberId, + String instanceId) + { + this.net = new KafkaCacheOffsetCommitNet(this, routedId, resolvedId, authorization); + this.sender = sender; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.authorization = authorization; + this.groupId = groupId; + this.memberId = memberId; + this.instanceId = instanceId; + } + + private void onOffsetCommitMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onOffsetCommitInitialBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onOffsetCommitInitialData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onOffsetCommitInitialEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onOffsetCommitInitialAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onOffsetCommitReplyWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onOffsetCommitReplyReset(reset); + break; + default: + break; + } + } + + private void onOffsetCommitInitialBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final long traceId = begin.traceId(); + final OctetsFW extension = begin.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + initialAck = acknowledge; + state = KafkaState.openingInitial(state); + + assert initialAck <= initialSeq; + + net.doOffsetCommitInitialBegin(traceId, extension); + } + + private void onOffsetCommitInitialData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + + assert initialAck <= initialSeq; + + net.doOffsetCommitInitialData(traceId, authorization, budgetId, reserved, flags, payload, extension); + } + + private void onOffsetCommitInitialEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + net.doOffsetCommitInitialEnd(traceId); + } + + private void onOffsetCommitInitialAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + net.doOffsetCommitInitialAbort(traceId); + } + + private void doOffsetCommitInitialReset( + long traceId, + Flyweight extension) + { + if (!KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, extension); + } + } + + private void doOffsetCommitInitialWindow( + long authorization, + long traceId, + long budgetId, + int padding) + { + initialAck = net.initialAck; + initialMax = net.initialMax; + + doWindow(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, padding); + } + + private void doOffsetCommitReplyBegin( + long traceId, + OctetsFW extension) + { + state = KafkaState.openingReply(state); + + doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, extension); + } + + private void doOffsetCommitReplyData( + long traceId, + int flag, + int reserved, + OctetsFW payload, + Flyweight extension) + { + + doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, flag, reserved, payload, extension); + + replySeq += reserved; + } + + private void doOffsetCommitReplyEnd( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doEnd(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void doOffsetCommitReplyAbort( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doAbort(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void onOffsetCommitReplyReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final int maximum = reset.maximum(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + cleanup(traceId); + } + + private void onOffsetCommitReplyWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + replyBud = budgetId; + replyPad = padding; + replyCap = capabilities; + state = KafkaState.openedReply(state); + + assert replyAck <= replySeq; + + net.doOffsetCommitReplyWindow(traceId, acknowledge, budgetId, padding); + } + + private void cleanup( + long traceId) + { + doOffsetCommitInitialReset(traceId, EMPTY_OCTETS); + doOffsetCommitReplyAbort(traceId); + + net.doOffsetCommitInitialAbort(traceId); + net.doOffsetCommitReplyReset(traceId, EMPTY_OCTETS); + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java index 3fdb17dccc..deee0bf4dc 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java @@ -1543,7 +1543,6 @@ private void doOffsetCommitInitialBegin( traceId, this.authorization, affinity, ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) .typeId(kafkaTypeId) .offsetCommit(oc -> oc - .topic(delegate.topic) .groupId(delegate.fanout.groupId) .memberId(delegate.fanout.memberId) .instanceId(delegate.fanout.instanceId)) @@ -1749,6 +1748,7 @@ private void onOffsetCommitRequest( doOffsetCommitInitialBegin(traceId, 0); commitRequests.add(new KafkaPartitionOffset( + delegate.topic, partition.partitionId(), partition.partitionOffset(), delegate.fanout.generationId, @@ -1795,6 +1795,7 @@ private void doOffsetCommit( .set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) .typeId(kafkaTypeId) .offsetCommit(oc -> oc + .topic(delegate.topic) .progress(p -> p.partitionId(commit.partitionId) .partitionOffset(commit.partitionOffset) .metadata(commit.metadata)) diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java index cf1b2379e3..bcdef3f1c8 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java @@ -73,9 +73,15 @@ public KafkaCacheServerFactory( final KafkaCacheServerConsumerFactory consumerGroupFactory = new KafkaCacheServerConsumerFactory(config, context, bindings::get); + final KafkaCacheOffsetCommitFactory cacheOffsetCommitFactory = + new KafkaCacheOffsetCommitFactory(config, context, bindings::get); + final KafkaCacheOffsetFetchFactory cacheOffsetFetchFactory = new KafkaCacheOffsetFetchFactory(config, context, bindings::get); + final KafkaCacheInitProducerIdFactory cacheInitProducerIdFactory = + new KafkaCacheInitProducerIdFactory(config, context, bindings::get); + final KafkaCacheServerFetchFactory cacheFetchFactory = new KafkaCacheServerFetchFactory( config, context, bindings::get, supplyCache, supplyCacheRoute); @@ -87,7 +93,9 @@ public KafkaCacheServerFactory( factories.put(KafkaBeginExFW.KIND_DESCRIBE, cacheDescribeFactory); factories.put(KafkaBeginExFW.KIND_GROUP, cacheGroupFactory); factories.put(KafkaBeginExFW.KIND_CONSUMER, consumerGroupFactory); + factories.put(KafkaBeginExFW.KIND_OFFSET_COMMIT, cacheOffsetCommitFactory); factories.put(KafkaBeginExFW.KIND_OFFSET_FETCH, cacheOffsetFetchFactory); + factories.put(KafkaBeginExFW.KIND_INIT_PRODUCER_ID, cacheInitProducerIdFactory); factories.put(KafkaBeginExFW.KIND_FETCH, cacheFetchFactory); factories.put(KafkaBeginExFW.KIND_PRODUCE, cacheProduceFactory); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFetchFactory.java index a8bb2e601b..4d72d26725 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFetchFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFetchFactory.java @@ -39,6 +39,7 @@ import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; import org.agrona.collections.Int2IntHashMap; +import org.agrona.collections.MutableInteger; import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; @@ -51,7 +52,6 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.cache.KafkaCacheTopic; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; -import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaTopicType; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.ArrayFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; @@ -88,6 +88,7 @@ import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; import io.aklivity.zilla.runtime.engine.buffer.BufferPool; import io.aklivity.zilla.runtime.engine.concurrent.Signaler; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; public final class KafkaCacheServerFetchFactory implements BindingHandler { @@ -157,6 +158,8 @@ public final class KafkaCacheServerFetchFactory implements BindingHandler private final Function supplyCache; private final LongFunction supplyCacheRoute; private final int reconnectDelay; + private final EngineContext context; + private final boolean verbose; public KafkaCacheServerFetchFactory( KafkaConfiguration config, @@ -165,6 +168,7 @@ public KafkaCacheServerFetchFactory( Function supplyCache, LongFunction supplyCacheRoute) { + this.context = context; this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); this.writeBuffer = context.writeBuffer(); this.extBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); @@ -180,6 +184,7 @@ public KafkaCacheServerFetchFactory( this.supplyCache = supplyCache; this.supplyCacheRoute = supplyCacheRoute; this.reconnectDelay = config.cacheServerReconnect(); + this.verbose = config.verbose(); } @Override @@ -232,10 +237,11 @@ public MessageConsumer newStream( final KafkaCache cache = supplyCache.apply(cacheName); final KafkaCacheTopic cacheTopic = cache.supplyTopic(topicName); final KafkaCachePartition partition = cacheTopic.supplyFetchPartition(partitionId); - final KafkaTopicType type = binding.topics != null ? binding.topics.get(topicName) : null; + final ConverterHandler convertKey = binding.resolveKeyReader(topicName); + final ConverterHandler convertValue = binding.resolveValueReader(topicName); final KafkaCacheServerFetchFanout newFanout = new KafkaCacheServerFetchFanout(routedId, resolvedId, authorization, - affinity, partition, routeDeltaType, defaultOffset, type); + affinity, partition, routeDeltaType, defaultOffset, convertKey, convertValue); cacheRoute.serverFetchFanoutsByTopicPartition.put(partitionKey, newFanout); fanout = newFanout; @@ -472,7 +478,10 @@ final class KafkaCacheServerFetchFanout private final KafkaOffsetType defaultOffset; private final long retentionMillisMax; private final List members; - private final KafkaTopicType type; + private final ConverterHandler convertKey; + private final ConverterHandler convertValue; + private final MutableInteger entryMark; + private final MutableInteger valueMark; private long leaderId; private long initialId; @@ -507,7 +516,8 @@ private KafkaCacheServerFetchFanout( KafkaCachePartition partition, KafkaDeltaType deltaType, KafkaOffsetType defaultOffset, - KafkaTopicType type) + ConverterHandler convertKey, + ConverterHandler convertValue) { this.originId = originId; this.routedId = routedId; @@ -518,7 +528,10 @@ private KafkaCacheServerFetchFanout( this.retentionMillisMax = defaultOffset == LIVE ? SECONDS.toMillis(30) : Long.MAX_VALUE; this.members = new ArrayList<>(); this.leaderId = leaderId; - this.type = type; + this.convertKey = convertKey; + this.convertValue = convertValue; + this.entryMark = new MutableInteger(0); + this.valueMark = new MutableInteger(0); } private void onServerFanoutMemberOpening( @@ -762,9 +775,9 @@ private void onServerFanoutReplyFlush( entryFlags |= CACHE_ENTRY_FLAGS_ABORTED; } - partition.writeEntry(partitionOffset, 0L, producerId, + partition.writeEntry(context, routedId, partitionOffset, entryMark, valueMark, 0L, producerId, EMPTY_KEY, EMPTY_HEADERS, EMPTY_OCTETS, null, - entryFlags, KafkaDeltaType.NONE, type); + entryFlags, KafkaDeltaType.NONE, convertKey, convertValue, verbose); if (result == KafkaTransactionResult.ABORT) { @@ -867,13 +880,14 @@ private void onServerFanoutReplyData( final int entryFlags = (flags & FLAGS_SKIP) != 0x00 ? CACHE_ENTRY_FLAGS_ABORTED : 0x00; final long keyHash = partition.computeKeyHash(key); final KafkaCacheEntryFW ancestor = findAndMarkAncestor(key, nextHead, (int) keyHash, partitionOffset); - partition.writeEntryStart(partitionOffset, timestamp, producerId, - key, keyHash, valueLength, ancestor, entryFlags, deltaType); + partition.writeEntryStart(context, routedId, partitionOffset, entryMark, valueMark, timestamp, producerId, + key, keyHash, valueLength, ancestor, entryFlags, deltaType, valueFragment, convertKey, convertValue, verbose); } if (valueFragment != null) { - partition.writeEntryContinue(valueFragment); + partition.writeEntryContinue(context, routedId, flags, partitionOffset, entryMark, valueMark, + valueFragment, convertValue, verbose); } if ((flags & FLAGS_FIN) != 0x00) @@ -892,7 +906,7 @@ private void onServerFanoutReplyData( assert partitionId == partition.id(); assert partitionOffset >= this.partitionOffset; - partition.writeEntryFinish(headers, deltaType, type); + partition.writeEntryFinish(headers, deltaType); this.partitionOffset = partitionOffset; this.stableOffset = stableOffset; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerProduceFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerProduceFactory.java index 28b058166c..f7f54ecc09 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerProduceFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerProduceFactory.java @@ -168,7 +168,7 @@ public KafkaCacheServerProduceFactory( this.supplyBinding = supplyBinding; this.supplyCache = supplyCache; this.supplyCacheRoute = supplyCacheRoute; - this.cursorFactory = new KafkaCacheCursorFactory(writeBuffer); + this.cursorFactory = new KafkaCacheCursorFactory(writeBuffer.capacity()); this.supplyRemoteIndex = context::supplyClientIndex; this.crc32c = new CRC32C(); this.reconnectDelay = config.cacheServerReconnect(); @@ -1171,6 +1171,8 @@ private void doProduceInitialData( { final long partitionOffset = nextEntry.offset$(); final long timestamp = nextEntry.timestamp(); + final long producerId = nextEntry.producerId(); + final short producerEpoch = nextEntry.producerEpoch(); final int sequence = nextEntry.sequence(); final KafkaAckMode ackMode = KafkaAckMode.valueOf(nextEntry.ackMode()); final KafkaKeyFW key = nextEntry.key(); @@ -1234,12 +1236,12 @@ private void doProduceInitialData( switch (flags) { case FLAG_INIT | FLAG_FIN: - doServerInitialDataFull(traceId, timestamp, sequence, checksum, ackMode, key, headers, trailers, - fragment, reserved, flags); + doServerInitialDataFull(traceId, timestamp, producerId, producerEpoch, sequence, checksum, + ackMode, key, headers, trailers, fragment, reserved, flags); break; case FLAG_INIT: - doServerInitialDataInit(traceId, deferred, timestamp, sequence, checksum, ackMode, key, - headers, trailers, fragment, reserved, flags); + doServerInitialDataInit(traceId, deferred, timestamp, producerId, producerEpoch, sequence, + checksum, ackMode, key, headers, trailers, fragment, reserved, flags); break; case FLAG_NONE: doServerInitialDataNone(traceId, fragment, reserved, length, flags); @@ -1277,6 +1279,8 @@ private void doProduceInitialData( private void doServerInitialDataFull( long traceId, long timestamp, + long producerId, + short produceEpoch, int sequence, long checksum, KafkaAckMode ackMode, @@ -1291,6 +1295,8 @@ private void doServerInitialDataFull( ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) .typeId(kafkaTypeId) .produce(f -> f.timestamp(timestamp) + .producerId(producerId) + .producerEpoch(produceEpoch) .sequence(sequence) .crc32c(checksum) .ackMode(a -> a.set(ackMode)) @@ -1308,6 +1314,8 @@ private void doServerInitialDataInit( long traceId, int deferred, long timestamp, + long producerId, + short produceEpoch, int sequence, long checksum, KafkaAckMode ackMode, @@ -1323,6 +1331,8 @@ private void doServerInitialDataInit( .typeId(kafkaTypeId) .produce(f -> f.deferred(deferred) .timestamp(timestamp) + .producerId(producerId) + .producerEpoch(produceEpoch) .sequence(sequence) .crc32c(checksum) .ackMode(a -> a.set(ackMode)) diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java index 7e3de1a680..27ff4fca6c 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java @@ -20,6 +20,7 @@ import static io.aklivity.zilla.runtime.engine.concurrent.Signaler.NO_CANCEL_ID; import static java.lang.System.currentTimeMillis; +import java.util.List; import java.util.function.Consumer; import java.util.function.IntConsumer; import java.util.function.LongFunction; @@ -37,6 +38,7 @@ import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaServerConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.budget.MergedBudgetCreditor; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; @@ -236,9 +238,10 @@ private KafkaClientConnection newConnection( long authorization) { final KafkaBindingConfig binding = supplyBinding.apply(originId); + final List servers = binding.servers(); final KafkaSaslConfig sasl = binding.sasl(); - return new KafkaClientConnection(originId, routedId, authorization, sasl); + return new KafkaClientConnection(originId, routedId, authorization, servers, sasl); } private MessageConsumer newNetworkStream( @@ -1217,9 +1220,10 @@ private KafkaClientConnection( long originId, long routedId, long authorization, + List servers, KafkaSaslConfig sasl) { - super(sasl, originId, routedId); + super(servers, sasl, originId, routedId); this.originId = originId; this.routedId = routedId; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientDescribeFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientDescribeFactory.java index 3676b3fff5..f1eaf5b83b 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientDescribeFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientDescribeFactory.java @@ -24,7 +24,6 @@ import static java.util.Objects.requireNonNull; import java.nio.ByteOrder; -import java.security.SecureRandom; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; @@ -134,8 +133,6 @@ public final class KafkaClientDescribeFactory extends KafkaClientSaslHandshaker private final KafkaDescribeClientDecoder decodeIgnoreAll = this::decodeIgnoreAll; private final KafkaDescribeClientDecoder decodeReject = this::decodeReject; - private final SecureRandom randomServerIdGenerator = new SecureRandom(); - private final long maxAgeMillis; private final int kafkaTypeId; private final int proxyTypeId; @@ -909,7 +906,6 @@ private final class KafkaDescribeClient extends KafkaSaslClient private MessageConsumer network; private final String topic; private final Map configs; - private final List servers; private int state; private long authorization; @@ -948,10 +944,9 @@ private final class KafkaDescribeClient extends KafkaSaslClient List servers, KafkaSaslConfig sasl) { - super(sasl, originId, routedId); + super(servers, sasl, originId, routedId); this.topic = requireNonNull(topic); this.configs = new LinkedHashMap<>(configs.size()); - this.servers = servers; configs.forEach(c -> this.configs.put(c, null)); this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeDescribeRequest; @@ -1196,19 +1191,16 @@ private void doNetworkBegin( Consumer extension = EMPTY_EXTENSION; - final KafkaServerConfig kafkaServerConfig = - servers != null ? servers.get(randomServerIdGenerator.nextInt(servers.size())) : null; - - if (kafkaServerConfig != null) + if (server != null) { extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) .typeId(proxyTypeId) .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) .source("0.0.0.0") - .destination(kafkaServerConfig.host) + .destination(server.host) .sourcePort(0) - .destinationPort(kafkaServerConfig.port))) - .infos(i -> i.item(ii -> ii.authority(kafkaServerConfig.host))) + .destinationPort(server.port))) + .infos(i -> i.item(ii -> ii.authority(server.host))) .build() .sizeof()); } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java index b68ca0ffd8..9b5bbd4c3b 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java @@ -91,6 +91,9 @@ public KafkaClientFactory( final KafkaClientOffsetCommitFactory clientOffsetCommitFactory = new KafkaClientOffsetCommitFactory( config, context, bindings::get, accountant::supplyDebitor, signaler, streamFactory, resolveSasl); + final KafkaClientInitProducerIdFactory clientInitProducerIdFactory = new KafkaClientInitProducerIdFactory( + config, context, bindings::get, accountant::supplyDebitor, signaler, streamFactory, resolveSasl); + final KafkaMergedFactory clientMergedFactory = new KafkaMergedFactory( config, context, bindings::get, accountant.creditor()); @@ -102,6 +105,7 @@ public KafkaClientFactory( factories.put(KafkaBeginExFW.KIND_PRODUCE, clientProduceFactory); factories.put(KafkaBeginExFW.KIND_OFFSET_COMMIT, clientOffsetCommitFactory); factories.put(KafkaBeginExFW.KIND_OFFSET_FETCH, clientOffsetFetchFactory); + factories.put(KafkaBeginExFW.KIND_INIT_PRODUCER_ID, clientInitProducerIdFactory); factories.put(KafkaBeginExFW.KIND_MERGED, clientMergedFactory); this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java index 63998ac9e7..f71de6de0d 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java @@ -32,6 +32,7 @@ import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaServerConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; @@ -246,6 +247,7 @@ public MessageConsumer newStream( MessageConsumer application) { final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long affinity = begin.affinity(); final long originId = begin.originId(); final long routedId = begin.routedId(); final long initialId = begin.streamId(); @@ -278,6 +280,9 @@ public MessageConsumer newStream( final KafkaIsolation isolation = kafkaFetchBeginEx.isolation().get(); final KafkaSaslConfig sasl = binding.sasl(); + final KafkaClientRoute clientRoute = supplyClientRoute.apply(resolvedId); + final KafkaServerConfig server = clientRoute.servers.get(affinity); + newStream = new KafkaFetchStream( application, originId, @@ -290,6 +295,7 @@ public MessageConsumer newStream( leaderId, initialOffset, isolation, + server, sasl)::onApplication; } } @@ -1747,6 +1753,7 @@ private final class KafkaFetchStream long leaderId, long initialOffset, KafkaIsolation isolation, + KafkaServerConfig server, KafkaSaslConfig sasl) { this.application = application; @@ -1757,7 +1764,7 @@ private final class KafkaFetchStream this.leaderId = leaderId; this.clientRoute = supplyClientRoute.apply(resolvedId); this.client = new KafkaFetchClient(routedId, resolvedId, topic, partitionId, - initialOffset, latestOffset, isolation, sasl); + initialOffset, latestOffset, isolation, server, sasl); } private int replyBudget() @@ -2217,9 +2224,10 @@ private final class KafkaFetchClient extends KafkaSaslClient long initialOffset, long latestOffset, KafkaIsolation isolation, + KafkaServerConfig server, KafkaSaslConfig sasl) { - super(sasl, originId, routedId); + super(server, sasl, originId, routedId); this.stream = KafkaFetchStream.this; this.topic = requireNonNull(topic); this.topicPartitions = clientRoute.supplyPartitions(topic); @@ -2459,18 +2467,16 @@ else if (nextOffset == OFFSET_LIVE || nextOffset == OFFSET_HISTORICAL) Consumer extension = EMPTY_EXTENSION; - final KafkaClientRoute clientRoute = supplyClientRoute.apply(routedId); - final KafkaBrokerInfo broker = clientRoute.brokers.get(affinity); - if (broker != null) + if (server != null) { extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) .typeId(proxyTypeId) .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) .source("0.0.0.0") - .destination(broker.host) + .destination(server.host) .sourcePort(0) - .destinationPort(broker.port))) - .infos(i -> i.item(ii -> ii.authority(broker.host))) + .destinationPort(server.port))) + .infos(i -> i.item(ii -> ii.authority(server.host))) .build() .sizeof()); } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index 00036a309f..ba89f747c1 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -25,7 +25,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import java.nio.ByteOrder; -import java.security.SecureRandom; import java.time.Duration; import java.util.ArrayDeque; import java.util.ArrayList; @@ -271,8 +270,6 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorIgnoreAll = this::decodeIgnoreAll; private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorReject = this::decodeCoordinatorReject; - private final SecureRandom randomServerIdGenerator = new SecureRandom(); - private final int kafkaTypeId; private final int proxyTypeId; private final MutableDirectBuffer writeBuffer; @@ -283,7 +280,6 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final Signaler signaler; private final BindingHandler streamFactory; private final UnaryOperator resolveSasl; - private final LongFunction supplyClientRoute; private final LongFunction supplyBinding; private final Supplier supplyInstanceId; private final LongFunction supplyDebitor; @@ -293,6 +289,7 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final Duration rebalanceTimeout; private final String groupMinSessionTimeoutDefault; private final String groupMaxSessionTimeoutDefault; + private final int encodeMaxBytes; public KafkaClientGroupFactory( KafkaConfiguration config, @@ -319,12 +316,12 @@ public KafkaClientGroupFactory( this.signaler = signaler; this.streamFactory = streamFactory; this.resolveSasl = resolveSasl; - this.supplyClientRoute = supplyClientRoute; this.instanceIds = new Long2ObjectHashMap<>(); this.groupStreams = new Object2ObjectHashMap<>(); this.configs = new LinkedHashMap<>(); this.groupMinSessionTimeoutDefault = String.valueOf(config.clientGroupMinSessionTimeoutDefault()); this.groupMaxSessionTimeoutDefault = String.valueOf(config.clientGroupMaxSessionTimeoutDefault()); + this.encodeMaxBytes = encodePool.slotCapacity() - GROUP_RECORD_FRAME_MAX_SIZE; } @Override @@ -364,6 +361,7 @@ public MessageConsumer newStream( if (resolved != null) { final long resolvedId = resolved.id; + final List servers = binding.servers(); final KafkaSaslConfig sasl = resolveSasl.apply(binding.sasl()); final GroupMembership groupMembership = instanceIds.get(binding.id); @@ -384,7 +382,7 @@ public MessageConsumer newStream( protocol, timeout, groupMembership, - binding.servers(), + servers, sasl); newStream = newGroup::onStream; @@ -1224,20 +1222,14 @@ private int decodeLeaveGroupResponse( private final class KafkaGroupStream { - private final ClusterClient clusterClient; - private final DescribeClient describeClient; - private final CoordinatorClient coordinatorClient; + private final ClusterClient cluster; private final GroupMembership groupMembership; - private final List servers; private final String groupId; private final String protocol; - private final long resolvedId; - private final int encodeMaxBytes; + private KafkaGroupClient client; private MessageConsumer sender; - private String host; private String nodeId; - private int port; private int timeout; private MutableDirectBuffer metadataBuffer; @@ -1284,14 +1276,10 @@ private final class KafkaGroupStream this.groupId = groupId; this.protocol = protocol; this.timeout = timeout; - this.resolvedId = resolvedId; this.groupMembership = groupMembership; - this.servers = servers; - this.clusterClient = new ClusterClient(routedId, resolvedId, sasl, this); - this.describeClient = new DescribeClient(routedId, resolvedId, sasl, this); - this.coordinatorClient = new CoordinatorClient(routedId, resolvedId, sasl, this); + this.cluster = new ClusterClient(routedId, resolvedId, servers, sasl, this); + this.client = cluster; this.metadataBuffer = new UnsafeBuffer(new byte[2048]); - this.encodeMaxBytes = encodePool.slotCapacity() - GROUP_RECORD_FRAME_MAX_SIZE; } private void onStream( @@ -1359,7 +1347,7 @@ private void onStreamBegin( state = KafkaState.openingInitial(state); - clusterClient.doNetworkBeginIfNecessary(traceId, authorization, affinity); + cluster.doNetworkBegin(traceId, authorization, affinity); doStreamWindow(traceId, 0, encodeMaxBytes); } @@ -1384,12 +1372,11 @@ private void onStreamData( if (initialSeq > initialAck + initialMax) { - cleanupStream(traceId, ERROR_EXISTS); - coordinatorClient.cleanupNetwork(traceId, authorization); + client.onStreamError(traceId, authorization, ERROR_EXISTS); } else { - coordinatorClient.doSyncGroupRequest(traceId, budgetId, payload); + client.onStreamData(traceId, budgetId, payload); } doStreamWindow(traceId, 0, encodeMaxBytes); @@ -1399,10 +1386,10 @@ private void onStreamEnd( EndFW end) { final long traceId = end.traceId(); - final long authorization = end.authorization(); state = KafkaState.closingInitial(state); - coordinatorClient.doLeaveGroupRequest(traceId); + + client.onStreamEnd(traceId); } private void onStreamFlush( @@ -1446,32 +1433,19 @@ private void onStreamFlush( } }); - if (host != null) - { - coordinatorClient.doJoinGroupRequest(traceId); - } - else - { - clusterClient.doEncodeRequestIfNecessary(traceId, budgetId); - } - } - else - { - coordinatorClient.doHeartbeatRequest(traceId); } + + client.onStreamFlush(traceId, budgetId, extension); } private void onStreamAbort( AbortFW abort) { final long traceId = abort.traceId(); - final long authorization = abort.authorization(); state = KafkaState.closedInitial(state); - clusterClient.doNetworkAbort(traceId); - describeClient.doNetworkAbort(traceId); - coordinatorClient.doNetworkAbort(traceId); + client.doNetworkAbort(traceId); cleanupStream(traceId, ERROR_EXISTS); } @@ -1505,9 +1479,7 @@ private void onStreamReset( state = KafkaState.closedReply(state); - clusterClient.doNetworkReset(traceId); - describeClient.doNetworkReset(traceId); - coordinatorClient.doNetworkReset(traceId); + client.doNetworkReset(traceId); } private boolean isStreamReplyOpen() @@ -1517,17 +1489,19 @@ private boolean isStreamReplyOpen() private void doStreamBeginIfNecessary( long traceId, - long authorization) + long authorization, + KafkaServerConfig server) { if (!KafkaState.replyOpening(state)) { - doStreamBegin(traceId, authorization); + doStreamBegin(traceId, authorization, server); } } private void doStreamBegin( long traceId, - long authorization) + long authorization, + KafkaServerConfig server) { state = KafkaState.openingReply(state); @@ -1538,8 +1512,8 @@ private void doStreamBegin( .groupId(groupId) .protocol(protocol) .instanceId(groupMembership.instanceId) - .host(host) - .port(port) + .host(server.host) + .port(server.port) .timeout(timeout)) .build(); @@ -1625,7 +1599,7 @@ private void doStreamWindow( state = KafkaState.openedInitial(state); doWindow(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, clusterClient.authorization, 0L, GROUP_RECORD_FRAME_MAX_SIZE); + traceId, cluster.authorization, 0L, GROUP_RECORD_FRAME_MAX_SIZE); } } @@ -1636,7 +1610,7 @@ private void doStreamReset( state = KafkaState.closedInitial(state); doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, clusterClient.authorization, extension); + traceId, cluster.authorization, extension); } private void doStreamAbortIfNecessary( @@ -1662,7 +1636,8 @@ private void onNotCoordinatorError( long traceId, long authorization) { - clusterClient.doNetworkBeginIfNecessary(traceId, authorization, affinity); + client = cluster; + client.doNetworkBegin(traceId, authorization, affinity); } private void onLeaveGroup( @@ -1719,11 +1694,72 @@ private void onStreamMigrate( state = 0; } - coordinatorClient.doJoinGroupRequest(traceId); + client.onStreamMigrate(traceId); + } + } + + private abstract class KafkaGroupClient extends KafkaSaslClient + { + protected KafkaGroupClient( + KafkaServerConfig server, + KafkaSaslConfig sasl, + long originId, + long routedId) + { + super(server, sasl, originId, routedId); + } + + protected abstract void doNetworkBegin( + long traceId, + long authorization, + long affinity); + + protected abstract void doNetworkAbort( + long traceId); + + protected abstract void doNetworkReset( + long traceId); + + protected KafkaGroupClient( + List servers, + KafkaSaslConfig sasl, + long originId, + long routedId) + { + super(servers, sasl, originId, routedId); + } + + protected void onStreamMigrate( + long traceId) + { + } + + protected void onStreamData( + long traceId, + long budgetId, + OctetsFW payload) + { + } + + protected void onStreamFlush( + long traceId, + long budgetId, + OctetsFW extension) + { } + + protected void onStreamEnd( + long traceId) + { + } + + protected abstract void onStreamError( + long traceId, + long authorization, + int error); } - private final class ClusterClient extends KafkaSaslClient + private final class ClusterClient extends KafkaGroupClient { private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; @@ -1765,10 +1801,11 @@ private final class ClusterClient extends KafkaSaslClient ClusterClient( long originId, long routedId, + List servers, KafkaSaslConfig sasl, KafkaGroupStream delegate) { - super(sasl, originId, routedId); + super(servers, sasl, originId, routedId); this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeFindCoordinatorRequest; this.delegate = delegate; @@ -1972,7 +2009,8 @@ private void onNetworkSignal( } } - private void doNetworkBeginIfNecessary( + @Override + protected void doNetworkBegin( long traceId, long authorization, long affinity) @@ -1988,44 +2026,32 @@ private void doNetworkBeginIfNecessary( if (!KafkaState.initialOpening(state)) { - doNetworkBegin(traceId, authorization, affinity); - } - } + assert state == 0; - private void doNetworkBegin( - long traceId, - long authorization, - long affinity) - { - assert state == 0; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); - this.initialId = supplyInitialId.applyAsLong(routedId); - this.replyId = supplyReplyId.applyAsLong(initialId); - - state = KafkaState.openingInitial(state); + state = KafkaState.openingInitial(state); - Consumer extension = EMPTY_EXTENSION; + Consumer extension = EMPTY_EXTENSION; - final KafkaServerConfig kafkaServerConfig = - delegate.servers != null ? - delegate.servers.get(randomServerIdGenerator.nextInt(delegate.servers.size())) : null; + if (server != null) + { + extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) + .typeId(proxyTypeId) + .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) + .source("0.0.0.0") + .destination(server.host) + .sourcePort(0) + .destinationPort(server.port))) + .infos(i -> i.item(ii -> ii.authority(server.host))) + .build() + .sizeof()); + } - if (kafkaServerConfig != null) - { - extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) - .typeId(proxyTypeId) - .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) - .source("0.0.0.0") - .destination(kafkaServerConfig.host) - .sourcePort(0) - .destinationPort(kafkaServerConfig.port))) - .infos(i -> i.item(ii -> ii.authority(kafkaServerConfig.host))) - .build() - .sizeof()); + network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, extension); } - - network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, extension); } @Override @@ -2066,7 +2092,8 @@ private void doNetworkEnd( cleanupBudgetIfNecessary(); } - private void doNetworkAbort( + @Override + protected void doNetworkAbort( long traceId) { if (!KafkaState.initialClosed(state)) @@ -2080,7 +2107,8 @@ private void doNetworkAbort( cleanupBudgetIfNecessary(); } - private void doNetworkReset( + @Override + protected void doNetworkReset( long traceId) { if (!KafkaState.replyClosed(state)) @@ -2116,6 +2144,25 @@ private void doNetworkWindow( } } + @Override + protected void onStreamFlush( + long traceId, + long budgetId, + OctetsFW extension) + { + doEncodeRequestIfNecessary(traceId, budgetId); + } + + @Override + protected void onStreamError( + long traceId, + long authorization, + int error) + { + delegate.cleanupStream(traceId, error); + cleanupNetwork(traceId, authorization); + } + private void doEncodeRequestIfNecessary( long traceId, long budgetId) @@ -2392,10 +2439,11 @@ private void onFindCoordinator( nextResponseId++; delegate.nodeId = String.valueOf(nodeId); - delegate.host = host.asString(); - delegate.port = port; - delegate.describeClient.doNetworkBegin(traceId, authorization, 0); + KafkaServerConfig server = new KafkaServerConfig(host.asString(), port); + delegate.client = new DescribeClient(originId, routedId, server, sasl, delegate); + + delegate.client.doNetworkBegin(traceId, authorization, 0); cleanupNetwork(traceId, authorization); } @@ -2448,7 +2496,7 @@ private void cleanupBudgetIfNecessary() } } - private final class DescribeClient extends KafkaSaslClient + private final class DescribeClient extends KafkaGroupClient { private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; @@ -2490,10 +2538,11 @@ private final class DescribeClient extends KafkaSaslClient DescribeClient( long originId, long routedId, + KafkaServerConfig server, KafkaSaslConfig sasl, KafkaGroupStream delegate) { - super(sasl, originId, routedId); + super(server, sasl, originId, routedId); this.configs = new LinkedHashMap<>(); this.delegate = delegate; @@ -2717,7 +2766,8 @@ private void onNetworkSignal( } } - private void doNetworkBegin( + @Override + protected void doNetworkBegin( long traceId, long authorization, long affinity) @@ -2738,19 +2788,16 @@ private void doNetworkBegin( Consumer extension = EMPTY_EXTENSION; - final KafkaClientRoute clientRoute = supplyClientRoute.apply(routedId); - final KafkaBrokerInfo broker = clientRoute.brokers.get(Long.parseLong(delegate.nodeId)); - - if (broker != null) + if (server != null) { extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) .typeId(proxyTypeId) .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) .source("0.0.0.0") - .destination(broker.host) + .destination(server.host) .sourcePort(0) - .destinationPort(broker.port))) - .infos(i -> i.item(ii -> ii.authority(broker.host))) + .destinationPort(server.port))) + .infos(i -> i.item(ii -> ii.authority(server.host))) .build() .sizeof()); } @@ -2795,7 +2842,8 @@ private void doNetworkEnd( traceId, authorization, EMPTY_EXTENSION); } - private void doNetworkAbort( + @Override + protected void doNetworkAbort( long traceId) { if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) @@ -2809,7 +2857,8 @@ private void doNetworkAbort( cleanupBudgetIfNecessary(); } - private void doNetworkReset( + @Override + protected void doNetworkReset( long traceId) { if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) @@ -2845,6 +2894,16 @@ private void doNetworkWindow( } } + @Override + protected void onStreamError( + long traceId, + long authorization, + int error) + { + delegate.cleanupStream(traceId, error); + cleanupNetwork(traceId); + } + private void doEncodeRequestIfNecessary( long traceId, long budgetId) @@ -3141,9 +3200,10 @@ else if (delegate.timeout > timeoutMax) delegate.timeout = timeoutMax; } - delegate.coordinatorClient.doNetworkBeginIfNecessary(traceId, authorization, 0); + delegate.client = new CoordinatorClient(originId, routedId, server, sasl, delegate); + delegate.client.doNetworkBegin(traceId, authorization, 0); - cleanupNetwork(traceId); + doNetworkEnd(traceId, authorization); } private void cleanupNetwork( @@ -3157,9 +3217,7 @@ private void onNetworkError( long traceId, short errorCode) { - doNetworkAbort(traceId); - doNetworkReset(traceId); - + cleanupNetwork(traceId); delegate.cleanupStream(traceId, errorCode); } @@ -3195,7 +3253,7 @@ private void cleanupBudgetIfNecessary() } } - private final class CoordinatorClient extends KafkaSaslClient + private final class CoordinatorClient extends KafkaGroupClient { private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; @@ -3243,10 +3301,11 @@ private final class CoordinatorClient extends KafkaSaslClient CoordinatorClient( long originId, long routedId, + KafkaServerConfig server, KafkaSaslConfig sasl, KafkaGroupStream delegate) { - super(sasl, originId, routedId); + super(server, sasl, originId, routedId); this.delegate = delegate; this.decoder = decodeCoordinatorReject; @@ -3478,7 +3537,8 @@ private void onNetworkSignal( } } - private void doNetworkBeginIfNecessary( + @Override + protected void doNetworkBegin( long traceId, long authorization, long affinity) @@ -3498,33 +3558,25 @@ private void doNetworkBeginIfNecessary( if (!KafkaState.initialOpening(state)) { - doNetworkBegin(traceId, authorization, affinity); - } - } - - private void doNetworkBegin( - long traceId, - long authorization, - long affinity) - { - this.initialId = supplyInitialId.applyAsLong(routedId); - this.replyId = supplyReplyId.applyAsLong(initialId); + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); - state = KafkaState.openingInitial(state); + state = KafkaState.openingInitial(state); - Consumer extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) - .typeId(proxyTypeId) - .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) - .source("0.0.0.0") - .destination(delegate.host) - .sourcePort(0) - .destinationPort(delegate.port))) - .infos(i -> i.item(ii -> ii.authority(delegate.host))) - .build() - .sizeof()); + Consumer extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) + .typeId(proxyTypeId) + .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) + .source("0.0.0.0") + .destination(server.host) + .sourcePort(0) + .destinationPort(server.port))) + .infos(i -> i.item(ii -> ii.authority(server.host))) + .build() + .sizeof()); - network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, extension); + network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, extension); + } } @Override @@ -3569,7 +3621,8 @@ private void doNetworkEnd( } - private void doNetworkAbort( + @Override + protected void doNetworkAbort( long traceId) { cancelHeartbeat(); @@ -3586,7 +3639,8 @@ private void doNetworkAbort( cleanupBudgetIfNecessary(); } - private void doNetworkReset( + @Override + protected void doNetworkReset( long traceId) { if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) @@ -3622,6 +3676,56 @@ private void doNetworkWindow( } } + + @Override + protected void onStreamMigrate( + long traceId) + { + doJoinGroupRequest(traceId); + } + + @Override + protected void onStreamData( + long traceId, + long budgetId, + OctetsFW payload) + { + doSyncGroupRequest(traceId, budgetId, payload); + } + + @Override + protected void onStreamFlush( + long traceId, + long budgetId, + OctetsFW extension) + { + if (extension.sizeof() > 0) + { + doJoinGroupRequest(traceId); + } + else + { + doHeartbeatRequest(traceId); + } + } + + @Override + protected void onStreamEnd( + long traceId) + { + doLeaveGroupRequest(traceId); + } + + @Override + protected void onStreamError( + long traceId, + long authorization, + int error) + { + delegate.cleanupStream(traceId, error); + cleanupNetwork(traceId, authorization); + } + private void doEncodeRequestIfNecessary( long traceId, long budgetId) @@ -3694,7 +3798,7 @@ private void doEncodeJoinGroupRequest( decoder = decodeJoinGroupResponse; - delegate.doStreamBeginIfNecessary(traceId, authorization); + delegate.doStreamBeginIfNecessary(traceId, authorization, server); } private int doGenerateSubscriptionMetadata() @@ -4036,9 +4140,9 @@ private void doJoinGroupRequest( encoders.add(encodeJoinGroupRequest); signaler.signalNow(originId, routedId, initialId, traceId, SIGNAL_NEXT_REQUEST, 0); } - else if (delegate.host != null) + else { - delegate.doStreamBeginIfNecessary(traceId, authorization); + delegate.doStreamBeginIfNecessary(traceId, authorization, server); } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientInitProducerIdFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientInitProducerIdFactory.java new file mode 100644 index 0000000000..e297109c80 --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientInitProducerIdFactory.java @@ -0,0 +1,1494 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.ProxyAddressProtocol.STREAM; +import static io.aklivity.zilla.runtime.engine.budget.BudgetCreditor.NO_BUDGET_ID; +import static io.aklivity.zilla.runtime.engine.budget.BudgetDebitor.NO_DEBITOR_INDEX; +import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; + +import java.util.List; +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.UnaryOperator; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.LongLongConsumer; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaServerConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.RequestHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.ResponseHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.produce.InitProducerIdRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.produce.InitProducerIdResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaInitProducerIdBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ProxyBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.SignalFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.budget.BudgetDebitor; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; +import io.aklivity.zilla.runtime.engine.concurrent.Signaler; + +public final class KafkaClientInitProducerIdFactory extends KafkaClientSaslHandshaker implements BindingHandler +{ + private static final int ERROR_NONE = 0; + + private static final int SIGNAL_NEXT_REQUEST = 1; + + private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + private static final short INIT_PRODUCE_ID_API_KEY = 22; + private static final short INIT_PRODUCE_ID_API_VERSION = 1; + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final SignalFW signalRO = new SignalFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder(); + private final ProxyBeginExFW.Builder proxyBeginExRW = new ProxyBeginExFW.Builder(); + + private final RequestHeaderFW.Builder requestHeaderRW = new RequestHeaderFW.Builder(); + private final InitProducerIdRequestFW.Builder initProducerIdRequestRW = new InitProducerIdRequestFW.Builder(); + + private final ResponseHeaderFW responseHeaderRO = new ResponseHeaderFW(); + private final InitProducerIdResponseFW initProducerrIdResponseRO = new InitProducerIdResponseFW(); + + private final KafkaInitProducerIdClientDecoder decodeSaslHandshakeResponse = this::decodeSaslHandshakeResponse; + private final KafkaInitProducerIdClientDecoder decodeSaslHandshake = this::decodeSaslHandshake; + private final KafkaInitProducerIdClientDecoder decodeSaslHandshakeMechanisms = this::decodeSaslHandshakeMechanisms; + private final KafkaInitProducerIdClientDecoder decodeSaslHandshakeMechanism = this::decodeSaslHandshakeMechanism; + private final KafkaInitProducerIdClientDecoder decodeSaslAuthenticateResponse = this::decodeSaslAuthenticateResponse; + private final KafkaInitProducerIdClientDecoder decodeSaslAuthenticate = this::decodeSaslAuthenticate; + private final KafkaInitProducerIdClientDecoder decodeInitProducerIdResponse = this::decodeInitProducerIdResponse; + + private final KafkaInitProducerIdClientDecoder decodeIgnoreAll = this::decodeIgnoreAll; + private final KafkaInitProducerIdClientDecoder decodeReject = this::decodeReject; + + private final int kafkaTypeId; + private final int proxyTypeId; + private final MutableDirectBuffer writeBuffer; + private final MutableDirectBuffer extBuffer; + private final BufferPool decodePool; + private final BufferPool encodePool; + private final Signaler signaler; + private final BindingHandler streamFactory; + private final UnaryOperator resolveSasl; + private final LongFunction supplyBinding; + private final LongFunction supplyDebitor; + + public KafkaClientInitProducerIdFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding, + LongFunction supplyDebitor, + Signaler signaler, + BindingHandler streamFactory, + UnaryOperator resolveSasl) + { + super(config, context); + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.proxyTypeId = context.supplyTypeId("proxy"); + this.signaler = signaler; + this.streamFactory = streamFactory; + this.resolveSasl = resolveSasl; + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.decodePool = context.bufferPool(); + this.encodePool = context.bufferPool(); + this.supplyBinding = supplyBinding; + this.supplyDebitor = supplyDebitor; + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer application) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long affinity = begin.affinity(); + final long authorization = begin.authorization(); + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()); + final KafkaBeginExFW kafkaBeginEx = beginEx != null && beginEx.typeId() == kafkaTypeId ? + kafkaBeginExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null; + + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_INIT_PRODUCER_ID; + final KafkaInitProducerIdBeginExFW initProducerIdBeginEx = kafkaBeginEx.initProducerId(); + final long producerId = initProducerIdBeginEx.producerId(); + final short producerEpoch = initProducerIdBeginEx.producerEpoch(); + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? + binding.resolve(authorization, null, null) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + final List servers = binding.servers(); + final KafkaSaslConfig sasl = resolveSasl.apply(binding.sasl()); + + newStream = new KafkaInitProducerIdStream( + application, + originId, + routedId, + initialId, + affinity, + resolvedId, + producerId, + producerEpoch, + servers, + sasl)::onApplication; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer payload, + int offset, + int length, + Consumer extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload, offset, length) + .extension(extension) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer payload, + int offset, + int length, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload, offset, length) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Flyweight extension) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + @FunctionalInterface + private interface KafkaInitProducerIdClientDecoder + { + int decode( + KafkaInitProducerIdClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int progress, + int limit); + } + + private int decodeInitProducerIdResponse( + KafkaInitProducerIdClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit); + if (responseHeader == null) + { + break decode; + } + + progress = responseHeader.limit(); + + + final InitProducerIdResponseFW initProducerIdResponse = initProducerrIdResponseRO.tryWrap(buffer, progress, limit); + if (initProducerIdResponse == null) + { + break decode; + } + + progress = initProducerIdResponse.limit(); + + short errorCode = initProducerIdResponse.errorCode(); + if (errorCode == ERROR_NONE) + { + client.onDecodeInitProducerrIdResponse( + traceId, initProducerIdResponse.producerId(), initProducerIdResponse.producerEpoch()); + } + else + { + client.errorCode = errorCode; + client.decoder = decodeReject; + } + } + + return progress; + } + + + private int decodeReject( + KafkaInitProducerIdClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + client.cleanupNetwork(traceId); + client.decoder = decodeIgnoreAll; + return limit; + } + + private int decodeIgnoreAll( + KafkaInitProducerIdClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + return limit; + } + + private final class KafkaInitProducerIdStream + { + private final MessageConsumer application; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final KafkaInitProducerIdClient client; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private long replyBudgetId; + + KafkaInitProducerIdStream( + MessageConsumer application, + long originId, + long routedId, + long initialId, + long affinity, + long resolvedId, + long producerId, + short producerEpoch, + List servers, + KafkaSaslConfig sasl) + { + this.application = application; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.client = new KafkaInitProducerIdClient(this, routedId, resolvedId, producerId, producerEpoch, servers, sasl); + } + + private void onApplication( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onApplicationBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onApplicationData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onApplicationEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onApplicationAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onApplicationWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onApplicationReset(reset); + break; + default: + break; + } + } + + private void onApplicationBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + + state = KafkaState.openingInitial(state); + + client.doNetworkBegin(traceId, authorization, affinity); + + doApplicationWindow(traceId, 0L, 0, 0, 0); + } + + private void onApplicationData( + DataFW data) + { + final long traceId = data.traceId(); + + client.cleanupNetwork(traceId); + } + + private void onApplicationEnd( + EndFW end) + { + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + state = KafkaState.closedInitial(state); + + client.doNetworkEnd(traceId, authorization); + } + + private void onApplicationAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedInitial(state); + + client.doNetworkAbortIfNecessary(traceId); + } + + private void onApplicationWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + this.replyAck = acknowledge; + this.replyMax = maximum; + this.replyPad = padding; + this.replyBudgetId = budgetId; + + assert replyAck <= replySeq; + } + + private void onApplicationReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + client.doNetworkResetIfNecessary(traceId); + } + + private boolean isApplicationReplyOpen() + { + return KafkaState.replyOpening(state); + } + + private void doApplicationBegin( + long traceId, + long authorization, + Consumer extension) + { + state = KafkaState.openingReply(state); + + doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, extension); + } + + private void doApplicationEnd( + long traceId) + { + state = KafkaState.closedReply(state); + doEnd(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, client.authorization, EMPTY_EXTENSION); + } + + private void doApplicationAbort( + long traceId) + { + state = KafkaState.closedReply(state); + doAbort(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, client.authorization, EMPTY_EXTENSION); + } + + private void doApplicationWindow( + long traceId, + long budgetId, + int minInitialNoAck, + int minInitialPad, + int minInitialMax) + { + final long newInitialAck = Math.max(initialSeq - minInitialNoAck, initialAck); + + if (newInitialAck > initialAck || minInitialMax > initialMax || !KafkaState.initialOpened(state)) + { + initialAck = newInitialAck; + assert initialAck <= initialSeq; + + initialMax = minInitialMax; + + state = KafkaState.openedInitial(state); + + doWindow(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, client.authorization, budgetId, minInitialPad); + } + } + + private void doApplicationReset( + long traceId, + Flyweight extension) + { + state = KafkaState.closedInitial(state); + + doReset(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, client.authorization, extension); + } + + private void doApplicationAbortIfNecessary( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doApplicationAbort(traceId); + } + } + + private void doApplicationResetIfNecessary( + long traceId, + Flyweight extension) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + doApplicationReset(traceId, extension); + } + } + + private void cleanupApplication( + long traceId, + int error) + { + final KafkaResetExFW kafkaResetEx = kafkaResetExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .error(error) + .build(); + + cleanupApplication(traceId, kafkaResetEx); + } + + private void cleanupApplication( + long traceId, + Flyweight extension) + { + doApplicationResetIfNecessary(traceId, extension); + doApplicationAbortIfNecessary(traceId); + } + } + + private final class KafkaInitProducerIdClient extends KafkaSaslClient + { + private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; + private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; + private final LongLongConsumer encodeInitProducerIdRequest = this::doEncodeInitProducerIdRequest; + + private final KafkaInitProducerIdStream delegate; + private final long producerId; + private final short producerEpoch; + + private short errorCode; + + private MessageConsumer network; + private int state; + private long authorization; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialMin; + private int initialPad; + private long initialBudgetId = NO_BUDGET_ID; + private long initialDebIndex = NO_DEBITOR_INDEX; + + private long replySeq; + private long replyAck; + private int replyMax; + + private int encodeSlot = NO_SLOT; + private int encodeSlotOffset; + private long encodeSlotTraceId; + + private int decodeSlot = NO_SLOT; + private int decodeSlotOffset; + private int decodeSlotReserved; + + private int nextResponseId; + + + private BudgetDebitor initialDeb; + private KafkaInitProducerIdClientDecoder decoder; + private LongLongConsumer encoder; + + KafkaInitProducerIdClient( + KafkaInitProducerIdStream delegate, + long originId, + long routedId, + long producerId, + short producerEpoch, + List servers, + KafkaSaslConfig sasl) + { + super(servers, sasl, originId, routedId); + this.delegate = delegate; + this.producerId = producerId; + this.producerEpoch = producerEpoch; + this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeInitProducerIdRequest; + + this.decoder = decodeReject; + } + + private void onNetwork( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onNetworkBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onNetworkData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onNetworkEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onNetworkAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onNetworkReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onNetworkWindow(window); + break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onNetworkSignal(signal); + break; + default: + break; + } + } + + private void onNetworkBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + authorization = begin.authorization(); + state = KafkaState.openingReply(state); + + doNetworkWindow(traceId, 0L, 0, 0, decodePool.slotCapacity()); + } + + private void onNetworkData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long budgetId = data.budgetId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + data.reserved(); + authorization = data.authorization(); + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + cleanupNetwork(traceId); + } + else + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final OctetsFW payload = data.payload(); + int reserved = data.reserved(); + int offset = payload.offset(); + int limit = payload.limit(); + + final MutableDirectBuffer buffer = decodePool.buffer(decodeSlot); + buffer.putBytes(decodeSlotOffset, payload.buffer(), offset, limit - offset); + decodeSlotOffset += limit - offset; + decodeSlotReserved += reserved; + + offset = 0; + limit = decodeSlotOffset; + reserved = decodeSlotReserved; + + decodeNetwork(traceId, authorization, budgetId, reserved, buffer, offset, limit); + } + } + } + + private void onNetworkEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedReply(state); + + cleanupDecodeSlotIfNecessary(); + + if (!delegate.isApplicationReplyOpen()) + { + cleanupNetwork(traceId); + } + else if (decodeSlot == NO_SLOT) + { + delegate.doApplicationEnd(traceId); + } + } + + private void onNetworkAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedReply(state); + + cleanupNetwork(traceId); + } + + private void onNetworkReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + cleanupNetwork(traceId); + } + + private void onNetworkWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int minimum = window.minimum(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= initialSeq; + assert acknowledge >= initialAck; + assert maximum + acknowledge >= initialMax + initialAck; + + this.initialAck = acknowledge; + this.initialMax = maximum; + this.initialPad = padding; + this.initialMin = minimum; + this.initialBudgetId = budgetId; + + assert initialAck <= initialSeq; + + this.authorization = window.authorization(); + + state = KafkaState.openedInitial(state); + + if (initialBudgetId != NO_BUDGET_ID && initialDebIndex == NO_DEBITOR_INDEX) + { + initialDeb = supplyDebitor.apply(initialBudgetId); + initialDebIndex = initialDeb.acquire(initialBudgetId, initialId, this::doNetworkDataIfNecessary); + assert initialDebIndex != NO_DEBITOR_INDEX; + } + + doNetworkDataIfNecessary(budgetId); + + doEncodeRequestIfNecessary(traceId, budgetId); + } + + private void doNetworkDataIfNecessary( + long traceId) + { + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot); + final int limit = encodeSlotOffset; + + encodeNetwork(traceId, authorization, initialBudgetId, buffer, 0, limit); + } + } + + private void onNetworkSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final int signalId = signal.signalId(); + + if (signalId == SIGNAL_NEXT_REQUEST) + { + doEncodeRequestIfNecessary(traceId, initialBudgetId); + } + } + + private void doNetworkBegin( + long traceId, + long authorization, + long affinity) + { + state = KafkaState.openingInitial(state); + + Consumer extension = EMPTY_EXTENSION; + + if (server != null) + { + extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) + .typeId(proxyTypeId) + .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) + .source("0.0.0.0") + .destination(server.host) + .sourcePort(0) + .destinationPort(server.port))) + .infos(i -> i.item(ii -> ii.authority(server.host))) + .build() + .sizeof()); + } + + network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, extension); + } + + @Override + protected void doNetworkData( + long traceId, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(encodeSlotOffset, buffer, offset, limit - offset); + encodeSlotOffset += limit - offset; + encodeSlotTraceId = traceId; + + buffer = encodeBuffer; + offset = 0; + limit = encodeSlotOffset; + } + + encodeNetwork(traceId, authorization, budgetId, buffer, offset, limit); + } + + private void doNetworkEnd( + long traceId, + long authorization) + { + state = KafkaState.closedInitial(state); + + cleanupEncodeSlotIfNecessary(); + cleanupBudgetIfNecessary(); + + doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + } + + private void doNetworkAbortIfNecessary( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + state = KafkaState.closedInitial(state); + } + + cleanupEncodeSlotIfNecessary(); + cleanupBudgetIfNecessary(); + } + + private void doNetworkResetIfNecessary( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + state = KafkaState.closedReply(state); + } + + cleanupDecodeSlotIfNecessary(); + } + + private void doNetworkWindow( + long traceId, + long budgetId, + int minReplyNoAck, + int minReplyPad, + int minReplyMax) + { + final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck); + + if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state)) + { + replyAck = newReplyAck; + assert replyAck <= replySeq; + + replyMax = minReplyMax; + + state = KafkaState.openedReply(state); + + doWindow(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, minReplyPad); + } + } + + private void doEncodeRequestIfNecessary( + long traceId, + long budgetId) + { + if (nextRequestId == nextResponseId) + { + encoder.accept(traceId, budgetId); + } + } + + private void doEncodeInitProducerIdRequest( + long traceId, + long budgetId) + { + + final MutableDirectBuffer encodeBuffer = writeBuffer; + final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; + final int encodeLimit = encodeBuffer.capacity(); + + int encodeProgress = encodeOffset; + + final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .length(0) + .apiKey(INIT_PRODUCE_ID_API_KEY) + .apiVersion(INIT_PRODUCE_ID_API_VERSION) + .correlationId(0) + .clientId(clientId) + .build(); + + encodeProgress = requestHeader.limit(); + + final InitProducerIdRequestFW initProducerIdRequest = + initProducerIdRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .producerId(producerId) + .producerEpoch(producerEpoch) + .build(); + + encodeProgress = initProducerIdRequest.limit(); + + final int requestId = nextRequestId++; + final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + + requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) + .length(requestSize) + .apiKey(requestHeader.apiKey()) + .apiVersion(requestHeader.apiVersion()) + .correlationId(requestId) + .clientId(requestHeader.clientId()) + .build(); + + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + + decoder = decodeInitProducerIdResponse; + } + + private void encodeNetwork( + long traceId, + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + final int length = limit - offset; + final int initialBudget = Math.max(initialMax - (int)(initialSeq - initialAck), 0); + final int reservedMax = Math.max(Math.min(length + initialPad, initialBudget), initialMin); + + int reserved = reservedMax; + + flush: + if (reserved > 0) + { + + boolean claimed = false; + + if (initialDebIndex != NO_DEBITOR_INDEX) + { + reserved = initialDeb.claim(traceId, initialDebIndex, initialId, reserved, reserved, 0); + claimed = reserved > 0; + } + + if (reserved < initialPad || reserved == initialPad && length > 0) + { + break flush; + } + + doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION); + + initialSeq += reserved; + + assert initialAck <= initialSeq; + } + + final int flushed = Math.max(reserved - initialPad, 0); + final int remaining = length - flushed; + if (remaining > 0) + { + if (encodeSlot == NO_SLOT) + { + encodeSlot = encodePool.acquire(initialId); + } + + if (encodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(0, buffer, offset + flushed, remaining); + encodeSlotOffset = remaining; + } + } + else + { + cleanupEncodeSlotIfNecessary(); + } + } + + private void decodeNetwork( + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int limit) + { + KafkaInitProducerIdClientDecoder previous = null; + int progress = offset; + while (progress <= limit && previous != decoder) + { + previous = decoder; + progress = decoder.decode(this, traceId, authorization, budgetId, reserved, buffer, offset, progress, limit); + } + + if (progress < limit) + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final MutableDirectBuffer decodeBuffer = decodePool.buffer(decodeSlot); + decodeBuffer.putBytes(0, buffer, progress, limit - progress); + decodeSlotOffset = limit - progress; + decodeSlotReserved = (limit - progress) * reserved / (limit - offset); + } + + doNetworkWindow(traceId, budgetId, decodeSlotOffset, 0, replyMax); + } + else + { + cleanupDecodeSlotIfNecessary(); + + if (KafkaState.replyClosing(state)) + { + delegate.doApplicationEnd(traceId); + } + else if (reserved > 0) + { + doNetworkWindow(traceId, budgetId, 0, 0, replyMax); + } + } + } + + @Override + protected void doDecodeSaslHandshakeResponse( + long traceId) + { + decoder = decodeSaslHandshakeResponse; + } + + @Override + protected void doDecodeSaslHandshake( + long traceId) + { + decoder = decodeSaslHandshake; + } + + @Override + protected void doDecodeSaslHandshakeMechanisms( + long traceId) + { + decoder = decodeSaslHandshakeMechanisms; + } + + @Override + protected void doDecodeSaslHandshakeMechansim( + long traceId) + { + decoder = decodeSaslHandshakeMechanism; + } + + @Override + protected void doDecodeSaslAuthenticateResponse( + long traceId) + { + decoder = decodeSaslAuthenticateResponse; + } + + @Override + protected void doDecodeSaslAuthenticate( + long traceId) + { + decoder = decodeSaslAuthenticate; + } + + @Override + protected void onDecodeSaslHandshakeResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + encoder = encodeSaslAuthenticateRequest; + decoder = decodeSaslAuthenticateResponse; + break; + default: + delegate.cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslAuthenticateResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + encoder = encodeInitProducerIdRequest; + decoder = decodeInitProducerIdResponse; + break; + default: + delegate.cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslResponse( + long traceId) + { + nextResponseId++; + signaler.signalNow(originId, routedId, initialId, traceId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onDecodeInitProducerrIdResponse( + long traceId, + long newProducerId, + short newProducerEpoch) + { + delegate.doApplicationBegin(traceId, authorization, ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .initProducerId(p -> p.producerId(newProducerId).producerEpoch(newProducerEpoch)) + .build() + .sizeof())); + } + + private void cleanupNetwork( + long traceId) + { + doNetworkResetIfNecessary(traceId); + doNetworkAbortIfNecessary(traceId); + + delegate.cleanupApplication(traceId, errorCode); + } + + private void cleanupDecodeSlotIfNecessary() + { + if (decodeSlot != NO_SLOT) + { + decodePool.release(decodeSlot); + decodeSlot = NO_SLOT; + decodeSlotOffset = 0; + decodeSlotReserved = 0; + } + } + + private void cleanupEncodeSlotIfNecessary() + { + if (encodeSlot != NO_SLOT) + { + encodePool.release(encodeSlot); + encodeSlot = NO_SLOT; + encodeSlotOffset = 0; + encodeSlotTraceId = 0; + } + } + + private void cleanupBudgetIfNecessary() + { + if (initialDebIndex != NO_DEBITOR_INDEX) + { + initialDeb.release(initialDebIndex, initialId); + initialDebIndex = NO_DEBITOR_INDEX; + } + } + } + +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientMetaFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientMetaFactory.java index d243e2058a..94d2598daf 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientMetaFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientMetaFactory.java @@ -23,7 +23,6 @@ import static java.lang.System.currentTimeMillis; import static java.util.Objects.requireNonNull; -import java.security.SecureRandom; import java.util.List; import java.util.Objects; import java.util.function.Consumer; @@ -138,8 +137,6 @@ public final class KafkaClientMetaFactory extends KafkaClientSaslHandshaker impl private final KafkaMetaClientDecoder decodeIgnoreAll = this::decodeIgnoreAll; private final KafkaMetaClientDecoder decodeReject = this::decodeReject; - private final SecureRandom randomServerIdGenerator = new SecureRandom(); - private final long maxAgeMillis; private final int kafkaTypeId; private final int proxyTypeId; @@ -1130,9 +1127,8 @@ private final class KafkaMetaClient extends KafkaSaslClient private MessageConsumer network; private final String topic; private final Int2IntHashMap topicPartitions; - private final List servers; - private final Long2ObjectHashMap newBrokers; + private final Long2ObjectHashMap newServers; private final Int2IntHashMap newPartitions; private int state; @@ -1179,11 +1175,10 @@ private final class KafkaMetaClient extends KafkaSaslClient List servers, KafkaSaslConfig sasl) { - super(sasl, originId, routedId); + super(servers, sasl, originId, routedId); this.topic = requireNonNull(topic); this.topicPartitions = clientRoute.supplyPartitions(topic); - this.servers = servers; - this.newBrokers = new Long2ObjectHashMap<>(); + this.newServers = new Long2ObjectHashMap<>(); this.newPartitions = new Int2IntHashMap(-1); this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeMetaRequest; @@ -1408,19 +1403,16 @@ private void doNetworkBegin( Consumer extension = EMPTY_EXTENSION; - final KafkaServerConfig kafkaServerConfig = - servers != null ? servers.get(randomServerIdGenerator.nextInt(servers.size())) : null; - - if (kafkaServerConfig != null) + if (server != null) { extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) .typeId(proxyTypeId) .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) .source("0.0.0.0") - .destination(kafkaServerConfig.host) + .destination(server.host) .sourcePort(0) - .destinationPort(kafkaServerConfig.port))) - .infos(i -> i.item(ii -> ii.authority(kafkaServerConfig.host))) + .destinationPort(server.port))) + .infos(i -> i.item(ii -> ii.authority(server.host))) .build() .sizeof()); } @@ -1786,7 +1778,7 @@ protected void onDecodeSaslAuthenticateResponse( private void onDecodeMetadata() { - newBrokers.clear(); + newServers.clear(); } private void onDecodeBroker( @@ -1794,14 +1786,14 @@ private void onDecodeBroker( String host, int port) { - newBrokers.put(brokerId, new KafkaBrokerInfo(brokerId, host, port)); + newServers.put(brokerId, new KafkaServerConfig(host, port)); } private void onDecodeBrokers() { - // TODO: share brokers across cores - clientRoute.brokers.clear(); - clientRoute.brokers.putAll(newBrokers); + // TODO: share servers across cores + clientRoute.servers.clear(); + clientRoute.servers.putAll(newServers); } private void onDecodeTopic( diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetCommitFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetCommitFactory.java index 43e284dc7a..7fec24693c 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetCommitFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetCommitFactory.java @@ -15,6 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.kafka.internal.stream; +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.ProxyAddressProtocol.STREAM; import static io.aklivity.zilla.runtime.engine.budget.BudgetCreditor.NO_BUDGET_ID; import static io.aklivity.zilla.runtime.engine.budget.BudgetDebitor.NO_DEBITOR_INDEX; import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; @@ -31,6 +32,7 @@ import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaServerConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; @@ -56,6 +58,7 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaOffsetCommitBeginExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaOffsetCommitDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ProxyBeginExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.SignalFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; @@ -100,6 +103,7 @@ public final class KafkaClientOffsetCommitFactory extends KafkaClientSaslHandsha private final ResetFW.Builder resetRW = new ResetFW.Builder(); private final WindowFW.Builder windowRW = new WindowFW.Builder(); private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder(); + private final ProxyBeginExFW.Builder proxyBeginExRW = new ProxyBeginExFW.Builder(); private final RequestHeaderFW.Builder requestHeaderRW = new RequestHeaderFW.Builder(); private final OffsetCommitRequestFW.Builder offsetCommitRequestRW = new OffsetCommitRequestFW.Builder(); @@ -127,6 +131,7 @@ public final class KafkaClientOffsetCommitFactory extends KafkaClientSaslHandsha private final KafkaOffsetCommitClientDecoder decodeReject = this::decodeReject; private final int kafkaTypeId; + private final int proxyTypeId; private final MutableDirectBuffer writeBuffer; private final MutableDirectBuffer extBuffer; private final BufferPool decodePool; @@ -150,6 +155,7 @@ public KafkaClientOffsetCommitFactory( { super(config, context); this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.proxyTypeId = context.supplyTypeId("proxy"); this.signaler = signaler; this.streamFactory = streamFactory; this.resolveSasl = resolveSasl; @@ -185,21 +191,25 @@ public MessageConsumer newStream( assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_OFFSET_COMMIT; final KafkaOffsetCommitBeginExFW kafkaOffsetCommitBeginEx = kafkaBeginEx.offsetCommit(); final String groupId = kafkaOffsetCommitBeginEx.groupId().asString(); - final String topic = kafkaOffsetCommitBeginEx.topic().asString(); final String memberId = kafkaOffsetCommitBeginEx.memberId().asString(); final String instanceId = kafkaOffsetCommitBeginEx.instanceId().asString(); + final String host = kafkaOffsetCommitBeginEx.host().asString(); + final int port = kafkaOffsetCommitBeginEx.port(); MessageConsumer newStream = null; final KafkaBindingConfig binding = supplyBinding.apply(routedId); final KafkaRouteConfig resolved = binding != null ? - binding.resolve(authorization, topic, groupId) : null; + binding.resolve(authorization, null, groupId) : null; if (resolved != null) { final long resolvedId = resolved.id; final KafkaSaslConfig sasl = resolveSasl.apply(binding.sasl()); + // TODO: use affinity (like meta, fetch, produce) instead of host and port + final KafkaServerConfig server = new KafkaServerConfig(host, port); + newStream = new KafkaOffsetCommitStream( application, originId, @@ -208,9 +218,9 @@ public MessageConsumer newStream( affinity, resolvedId, groupId, - topic, memberId, instanceId, + server, sasl)::onApplication; } @@ -582,8 +592,6 @@ private int decodeOffsetCommitPartition( client.errorCode = errorCode; client.decoder = decodeReject; } - - } return progress; @@ -649,9 +657,9 @@ private final class KafkaOffsetCommitStream long affinity, long resolvedId, String groupId, - String topic, String memberId, String instanceId, + KafkaServerConfig server, KafkaSaslConfig sasl) { this.application = application; @@ -661,8 +669,8 @@ private final class KafkaOffsetCommitStream this.replyId = supplyReplyId.applyAsLong(initialId); this.affinity = affinity; this.initialMax = encodeMaxBytes; - this.client = new KafkaOffsetCommitClient(this, routedId, resolvedId, groupId, topic, - memberId, instanceId, sasl); + this.client = new KafkaOffsetCommitClient(this, routedId, resolvedId, groupId, + memberId, instanceId, server, sasl); } private void onApplication( @@ -743,11 +751,12 @@ private void onApplicationData( kafkaDataExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null; final KafkaOffsetCommitDataExFW commitDataExFW = kafkaDataEx.offsetCommit(); + final String topic = commitDataExFW.topic().asString(); final KafkaOffsetFW progress = commitDataExFW.progress(); final int generationId = commitDataExFW.generationId(); final int leaderEpoch = commitDataExFW.leaderEpoch(); - client.onOffsetCommit(traceId, progress.partitionId(), progress.partitionOffset(), + client.onOffsetCommit(traceId, topic, progress.partitionId(), progress.partitionOffset(), generationId, leaderEpoch, progress.metadata().asString()); } } @@ -933,7 +942,6 @@ private final class KafkaOffsetCommitClient extends KafkaSaslClient private final LongLongConsumer encodeOffsetCommitRequest = this::doEncodeOffsetCommitRequestIfNecessary; private final String groupId; - private final String topic; private final String memberId; private final String instanceId; private final KafkaOffsetCommitStream delegate; @@ -975,15 +983,14 @@ private final class KafkaOffsetCommitClient extends KafkaSaslClient long originId, long routedId, String groupId, - String topic, String memberId, String instanceId, + KafkaServerConfig server, KafkaSaslConfig sasl) { - super(sasl, originId, routedId); + super(server, sasl, originId, routedId); this.delegate = delegate; this.groupId = requireNonNull(groupId); - this.topic = requireNonNull(topic); this.memberId = requireNonNull(memberId); this.instanceId = instanceId; this.commits = new ArrayDeque<>(); @@ -1207,8 +1214,24 @@ private void doNetworkBegin( { state = KafkaState.openingInitial(state); + Consumer extension = EMPTY_EXTENSION; + + if (server != null) + { + extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) + .typeId(proxyTypeId) + .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) + .source("0.0.0.0") + .destination(server.host) + .sourcePort(0) + .destinationPort(server.port))) + .infos(i -> i.item(ii -> ii.authority(server.host))) + .build() + .sizeof()); + } + network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, EMPTY_EXTENSION); + traceId, authorization, affinity, extension); } @Override @@ -1299,13 +1322,14 @@ private void doNetworkWindow( private void onOffsetCommit( long traceId, + String topic, int partitionId, long partitionOffset, int generationId, int leaderEpoch, String metadata) { - commits.add(new KafkaPartitionOffset(partitionId, + commits.add(new KafkaPartitionOffset(topic, partitionId, partitionOffset, generationId, leaderEpoch, metadata)); doEncodeRequestIfNecessary(traceId, initialBudgetId); @@ -1373,7 +1397,7 @@ private void doEncodeOffsetCommitRequest( final OffsetCommitTopicRequestFW topicRequest = offsetCommitTopicRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) - .name(topic) + .name(commit.topic) .partitionCount(1) .build(); encodeProgress = topicRequest.limit(); @@ -1420,7 +1444,6 @@ private void encodeNetwork( flush: if (reserved > 0) { - boolean claimed = false; if (initialDebIndex != NO_DEBITOR_INDEX) @@ -1479,6 +1502,7 @@ private void decodeNetwork( { KafkaOffsetCommitClientDecoder previous = null; int progress = offset; + while (progress <= limit && previous != decoder) { previous = decoder; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java index 9961f19c07..35f415c6f7 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java @@ -33,6 +33,7 @@ import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaServerConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; @@ -206,6 +207,9 @@ public MessageConsumer newStream( final long resolvedId = resolved.id; final KafkaSaslConfig sasl = resolveSasl.apply(binding.sasl()); + // TODO: use affinity (like meta, fetch, produce) instead of host and port + final KafkaServerConfig server = new KafkaServerConfig(host, port); + newStream = new KafkaOffsetFetchStream( application, originId, @@ -214,10 +218,9 @@ public MessageConsumer newStream( affinity, resolvedId, groupId, - host, - port, topic, partitions, + server, sasl)::onApplication; } @@ -777,10 +780,9 @@ private final class KafkaOffsetFetchStream long affinity, long resolvedId, String groupId, - String host, - int port, String topic, IntHashSet partitions, + KafkaServerConfig server, KafkaSaslConfig sasl) { this.application = application; @@ -789,8 +791,8 @@ private final class KafkaOffsetFetchStream this.initialId = initialId; this.replyId = supplyReplyId.applyAsLong(initialId); this.affinity = affinity; - this.client = new KafkaOffsetFetchClient(this, routedId, resolvedId, groupId, host, port, - topic, partitions, sasl); + this.client = new KafkaOffsetFetchClient(this, routedId, resolvedId, groupId, + topic, partitions, server, sasl); } private void onApplication( @@ -1043,8 +1045,6 @@ private final class KafkaOffsetFetchClient extends KafkaSaslClient private final KafkaOffsetFetchStream delegate; private final String groupId; - private final String host; - private final int port; private final String topic; private final IntHashSet partitions; private final ObjectHashSet topicPartitions; @@ -1088,17 +1088,14 @@ private final class KafkaOffsetFetchClient extends KafkaSaslClient long originId, long routedId, String groupId, - String host, - int port, String topic, IntHashSet partitions, + KafkaServerConfig server, KafkaSaslConfig sasl) { - super(sasl, originId, routedId); + super(server, sasl, originId, routedId); this.delegate = delegate; this.groupId = requireNonNull(groupId); - this.host = host; - this.port = port; this.topic = topic; this.partitions = partitions; this.topicPartitions = new ObjectHashSet<>(); @@ -1325,10 +1322,10 @@ private void doNetworkBegin( .typeId(proxyTypeId) .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) .source("0.0.0.0") - .destination(host) + .destination(server.host) .sourcePort(0) - .destinationPort(port))) - .infos(i -> i.item(ii -> ii.authority(host))) + .destinationPort(server.port))) + .infos(i -> i.item(ii -> ii.authority(server.host))) .build() .sizeof()); @@ -1737,6 +1734,7 @@ public void onDecodePartition( OffsetFetchPartitionResponseFW partition) { topicPartitions.add(new KafkaPartitionOffset( + topic, partition.partitionIndex(), partition.committedOffset(), 0, diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java index b2fb009d15..d21855ab20 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java @@ -39,6 +39,7 @@ import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaServerConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; @@ -93,9 +94,10 @@ public final class KafkaClientProduceFactory extends KafkaClientSaslHandshaker i private static final byte RECORD_BATCH_MAGIC = 2; private static final short RECORD_BATCH_ATTRIBUTES_NONE = 0; private static final short RECORD_BATCH_ATTRIBUTES_NO_TIMESTAMP = 0x08; - private static final int RECORD_BATCH_PRODUCER_ID_NONE = -1; + private static final long RECORD_BATCH_PRODUCER_ID_NONE = -1; private static final short RECORD_BATCH_PRODUCER_EPOCH_NONE = -1; - private static final short RECORD_BATCH_SEQUENCE_NONE = -1; + private static final int RECORD_BATCH_BASE_SEQUENCE_NONE = -1; + private static final int RECORD_SEQUENCE_NONE = -1; private static final byte RECORD_ATTRIBUTES_NONE = 0; private static final String TRANSACTION_ID_NONE = null; @@ -254,6 +256,9 @@ public MessageConsumer newStream( final int partitionId = kafkaProduceBeginEx.partition().partitionId(); final KafkaSaslConfig sasl = binding.sasl(); + final KafkaClientRoute clientRoute = supplyClientRoute.apply(resolvedId); + final KafkaServerConfig server = clientRoute.servers.get(affinity); + newStream = new KafkaProduceStream( application, originId, @@ -263,6 +268,7 @@ public MessageConsumer newStream( resolvedId, topicName, partitionId, + server, sasl)::onApplication; } } @@ -531,6 +537,9 @@ private int flushRecordInit( assert kafkaDataEx.kind() == KafkaDataExFW.KIND_PRODUCE; final KafkaProduceDataExFW kafkaProduceDataEx = kafkaDataEx.produce(); final long timestamp = kafkaProduceDataEx.timestamp(); + final long producerId = kafkaProduceDataEx.producerId(); + final short producerEpoch = kafkaProduceDataEx.producerEpoch(); + final int sequence = kafkaProduceDataEx.sequence(); final KafkaAckMode ackMode = kafkaProduceDataEx.ackMode().get(); final KafkaKeyFW key = kafkaProduceDataEx.key(); final Array32FW headers = kafkaProduceDataEx.headers(); @@ -542,11 +551,23 @@ private int flushRecordInit( final int maxEncodeableBytes = client.encodeSlotLimit + client.valueCompleteSize + produceRecordFramingSize; if (client.encodeSlot != NO_SLOT && - maxEncodeableBytes > encodePool.slotCapacity()) + (maxEncodeableBytes > encodePool.slotCapacity() || + client.producerId != producerId || + client.producerEpoch != producerEpoch || + sequence <= client.sequence && sequence != RECORD_BATCH_BASE_SEQUENCE_NONE)) { client.doEncodeRequestIfNecessary(traceId, budgetId); } + if (client.producerId == RECORD_BATCH_PRODUCER_ID_NONE) + { + client.baseSequence = sequence; + } + + client.producerId = producerId; + client.producerEpoch = producerEpoch; + client.sequence = sequence; + client.doEncodeRecordInit(traceId, timestamp, ackMode, key, payload, headers); if (client.encodeSlot != NO_SLOT) { @@ -900,6 +921,7 @@ private final class KafkaProduceStream long resolvedId, String topic, int partitionId, + KafkaServerConfig server, KafkaSaslConfig sasl) { this.application = application; @@ -908,7 +930,7 @@ private final class KafkaProduceStream this.initialId = initialId; this.replyId = supplyReplyId.applyAsLong(initialId); this.affinity = affinity; - this.client = new KafkaProduceClient(this, resolvedId, topic, partitionId, sasl); + this.client = new KafkaProduceClient(this, resolvedId, topic, partitionId, server, sasl); } private void onApplication( @@ -1184,7 +1206,6 @@ private final class KafkaProduceClient extends KafkaSaslClient private final KafkaProduceStream stream; private final String topic; private final int partitionId; - private final KafkaClientRoute clientRoute; private KafkaAckMode encodeableAckMode; private KafkaAckMode encodedAckMode; @@ -1237,19 +1258,24 @@ private final class KafkaProduceClient extends KafkaSaslClient private LongLongConsumer encoder; private boolean flushable; + private long producerId = RECORD_BATCH_PRODUCER_ID_NONE; + private short producerEpoch = RECORD_BATCH_PRODUCER_EPOCH_NONE; + private int baseSequence = RECORD_BATCH_BASE_SEQUENCE_NONE; + private int sequence = RECORD_SEQUENCE_NONE; + KafkaProduceClient( KafkaProduceStream stream, long resolvedId, String topic, int partitionId, + KafkaServerConfig server, KafkaSaslConfig sasl) { - super(sasl, stream.routedId, resolvedId); + super(server, sasl, stream.routedId, resolvedId); this.stream = stream; this.topic = requireNonNull(topic); this.partitionId = partitionId; this.flusher = flushRecord; - this.clientRoute = supplyClientRoute.apply(resolvedId); this.encodeableRecordBatchTimestamp = TIMESTAMP_NONE; this.encodeableRecordBatchTimestampMax = TIMESTAMP_NONE; this.encodeableAckMode = KafkaAckMode.NONE; @@ -1472,17 +1498,16 @@ private void doNetworkBegin( Consumer extension = EMPTY_EXTENSION; - final KafkaBrokerInfo broker = clientRoute.brokers.get(affinity); - if (broker != null) + if (server != null) { extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) .typeId(proxyTypeId) .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) .source("0.0.0.0") - .destination(broker.host) + .destination(server.host) .sourcePort(0) - .destinationPort(broker.port))) - .infos(i -> i.item(ii -> ii.authority(broker.host))) + .destinationPort(server.port))) + .infos(i -> i.item(ii -> ii.authority(server.host))) .build() .sizeof()); } @@ -1878,6 +1903,9 @@ private void doEncodeProduceRequest( ? RECORD_BATCH_ATTRIBUTES_NO_TIMESTAMP : RECORD_BATCH_ATTRIBUTES_NONE; + final int baseSequence = client.producerId == RECORD_BATCH_PRODUCER_ID_NONE ? RECORD_BATCH_BASE_SEQUENCE_NONE : + client.baseSequence; + final RecordBatchFW recordBatch = recordBatchRW.wrap(encodeBuffer, encodeProgress, encodeLimit) .baseOffset(0) .length(recordBatchLength) @@ -1888,9 +1916,9 @@ private void doEncodeProduceRequest( .lastOffsetDelta(encodeableRecordCount - 1) .firstTimestamp(encodeableRecordBatchTimestamp) .maxTimestamp(encodeableRecordBatchTimestampMax) - .producerId(RECORD_BATCH_PRODUCER_ID_NONE) - .producerEpoch(RECORD_BATCH_PRODUCER_EPOCH_NONE) - .baseSequence(RECORD_BATCH_SEQUENCE_NONE) + .producerId(client.producerId) + .producerEpoch(client.producerEpoch) + .baseSequence(baseSequence) .recordCount(encodeableRecordCount) .build(); @@ -1922,6 +1950,10 @@ private void doEncodeProduceRequest( encodeableRecordBatchTimestamp = TIMESTAMP_NONE; encodedAckMode = encodeableAckMode; encodeableAckMode = KafkaAckMode.NONE; + client.producerId = RECORD_BATCH_PRODUCER_ID_NONE; + client.producerEpoch = RECORD_BATCH_PRODUCER_EPOCH_NONE; + client.baseSequence = RECORD_BATCH_BASE_SEQUENCE_NONE; + client.sequence = RECORD_SEQUENCE_NONE; assert encodeSlot != NO_SLOT; final MutableDirectBuffer encodeSlotBuffer = encodePool.buffer(encodeSlot); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientRoute.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientRoute.java index 54268e6a2d..15884b5698 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientRoute.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientRoute.java @@ -19,10 +19,12 @@ import org.agrona.collections.Int2ObjectHashMap; import org.agrona.collections.Long2ObjectHashMap; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaServerConfig; + public final class KafkaClientRoute { public final long resolvedId; - public final Long2ObjectHashMap brokers; + public final Long2ObjectHashMap servers; public final Int2ObjectHashMap partitions; public volatile long metaInitialId; @@ -31,7 +33,7 @@ public KafkaClientRoute( long resolvedId) { this.resolvedId = resolvedId; - this.brokers = new Long2ObjectHashMap<>(); + this.servers = new Long2ObjectHashMap<>(); this.partitions = new Int2ObjectHashMap<>(); } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java index ab21551f5a..b6a3b266c4 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java @@ -15,10 +15,15 @@ */ package io.aklivity.zilla.runtime.binding.kafka.internal.stream; +import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration.KAFKA_CLIENT_ID_DEFAULT; + import java.nio.charset.StandardCharsets; import java.security.MessageDigest; +import java.security.SecureRandom; import java.util.Arrays; import java.util.Base64; +import java.util.List; +import java.util.Map; import java.util.function.LongUnaryOperator; import java.util.function.Supplier; import java.util.regex.Matcher; @@ -31,9 +36,12 @@ import org.agrona.LangUtil; import org.agrona.MutableDirectBuffer; import org.agrona.collections.LongLongConsumer; +import org.agrona.collections.Object2ObjectHashMap; import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaServerConfig; +import io.aklivity.zilla.runtime.binding.kafka.identity.KafkaClientIdSupplier; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaScramMechanism; import io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW; @@ -74,6 +82,8 @@ public abstract class KafkaClientSaslHandshaker private static final byte[] SASL_SCRAM_SALT_PASSWORD = ",p=".getBytes(StandardCharsets.US_ASCII); private static final String SASL_SCRAM_CHANNEL_RANDOM = Base64.getEncoder().encodeToString(SASL_SCRAM_CHANNEL_BINDING); + private static final String16FW KAFKA_CLIENT_ID_DEFAULT_VALUE = new String16FW(KAFKA_CLIENT_ID_DEFAULT); + private final RequestHeaderFW.Builder requestHeaderRW = new RequestHeaderFW.Builder(); private final SaslHandshakeRequestFW.Builder saslHandshakeRequestRW = new SaslHandshakeRequestFW.Builder(); private final SaslAuthenticateRequestFW.Builder saslAuthenticateRequestRW = new SaslAuthenticateRequestFW.Builder(); @@ -87,7 +97,10 @@ public abstract class KafkaClientSaslHandshaker private KafkaSaslClientDecoder decodeSaslScramAuthenticateFirst = this::decodeSaslScramAuthenticateFirst; private KafkaSaslClientDecoder decodeSaslScramAuthenticateFinal = this::decodeSaslScramAuthenticateFinal; + private final SecureRandom random = new SecureRandom(); + private final MutableDirectBuffer scramBuffer = new UnsafeBuffer(new byte[1024]); + private MessageDigest messageDigest; private Mac mac; private Supplier nonceSupplier; @@ -95,7 +108,9 @@ public abstract class KafkaClientSaslHandshaker private Matcher serverResponseMatcher; private byte[] result, ui, prev; - protected final String16FW clientId; + private final Map clientIdsByServer; + + protected final KafkaClientIdSupplier clientIdSupplier; protected final LongUnaryOperator supplyInitialId; protected final LongUnaryOperator supplyReplyId; protected final MutableDirectBuffer writeBuffer; @@ -104,11 +119,12 @@ public KafkaClientSaslHandshaker( KafkaConfiguration config, EngineContext context) { - this.clientId = new String16FW(config.clientId()); + this.clientIdSupplier = KafkaClientIdSupplier.instantiate(config); this.supplyInitialId = context::supplyInitialId; this.supplyReplyId = context::supplyReplyId; this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.nonceSupplier = config.nonceSupplier(); + this.clientIdsByServer = new Object2ObjectHashMap<>(); } public abstract class KafkaSaslClient @@ -116,6 +132,9 @@ public abstract class KafkaSaslClient protected final KafkaSaslConfig sasl; protected final long originId; protected final long routedId; + protected final KafkaServerConfig server; + protected final String16FW clientId; + protected long initialId; protected long replyId; @@ -132,15 +151,29 @@ public abstract class KafkaSaslClient private LongLongConsumer encodeSaslAuthenticate; private KafkaSaslClientDecoder decodeSaslAuthenticate; + protected KafkaSaslClient( + List servers, + KafkaSaslConfig sasl, + long originId, + long routedId) + { + this(servers != null && !servers.isEmpty() + ? servers.get(random.nextInt(servers.size())) + : null, + sasl, originId, routedId); + } protected KafkaSaslClient( + KafkaServerConfig server, KafkaSaslConfig sasl, long originId, long routedId) { + this.server = server; this.sasl = sasl; this.originId = originId; this.routedId = routedId; + this.clientId = supplyClientId(server); this.initialId = supplyInitialId.applyAsLong(routedId); this.replyId = supplyReplyId.applyAsLong(initialId); } @@ -780,6 +813,21 @@ private int decodeSaslScramAuthenticateFinal( return progress; } + private String16FW supplyClientId( + KafkaServerConfig server) + { + return server != null + ? clientIdsByServer.computeIfAbsent(server, this::createClientId) + : KAFKA_CLIENT_ID_DEFAULT_VALUE; + } + + private String16FW createClientId( + KafkaServerConfig server) + { + String clientId = clientIdSupplier.get(server.host); + return clientId != null ? new String16FW(clientId) : KAFKA_CLIENT_ID_DEFAULT_VALUE; + } + public byte[] hmac(byte[] key, byte[] bytes) { try diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java index 1b5e2351c8..1178b0b24c 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java @@ -87,6 +87,7 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaMergedConsumerFlushExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaMergedFlushExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaMergedProduceDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaMergedProduceFlushExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaMetaDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaOffsetFetchDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; @@ -126,6 +127,7 @@ public final class KafkaMergedFactory implements BindingHandler private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); + private static final KafkaKeyFW EMPTY_KEY = new KafkaKeyFW(); private static final Consumer EMPTY_EXTENSION = ex -> {}; private static final MessageConsumer NO_RECEIVER = (m, b, i, l) -> {}; @@ -1362,7 +1364,6 @@ private void onMergedInitialFlush( { final long traceId = flush.traceId(); final long sequence = flush.sequence(); - final long acknowledge = flush.acknowledge(); final OctetsFW extension = flush.extension(); final int reserved = flush.reserved(); final ExtensionFW flushEx = extension.get(extensionRO::tryWrap); @@ -1377,6 +1378,9 @@ private void onMergedInitialFlush( switch (kafkaMergedFlushEx.kind()) { + case KafkaMergedFlushExFW.KIND_PRODUCE: + onMergedProduceFlush(kafkaMergedFlushEx, traceId); + break; case KafkaMergedFlushExFW.KIND_FETCH: onMergedFetchFlush(kafkaMergedFlushEx, traceId, sequence, reserved); break; @@ -1386,6 +1390,18 @@ private void onMergedInitialFlush( } } + private void onMergedProduceFlush( + KafkaMergedFlushExFW kafkaMergedFlushEx, + long traceId) + { + final KafkaMergedProduceFlushExFW produce = kafkaMergedFlushEx.produce(); + final KafkaKeyFW hashKey = produce.hashKey(); + + final int partitionId = nextPartitionData(hashKey, EMPTY_KEY); + + doMergedProduceReplyFlush(traceId, partitionId); + } + private void onMergedFetchFlush( KafkaMergedFlushExFW kafkaMergedFlushEx, long traceId, @@ -1592,28 +1608,34 @@ private void doMergedReplyBegin( if (capabilities == FETCH_ONLY) { doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, affinity, beginExToKafka()); + traceId, authorization, affinity, beginExToKafka(beginExToKafkaMergedFetchOnly())); + } + else if (capabilities == PRODUCE_ONLY) + { + doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, beginExToKafka(beginExToKafkaMergedProduceOnly())); } else { doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, affinity, EMPTY_EXTENSION); + traceId, authorization, affinity, EMPTY_EXTENSION); } doUnmergedFetchReplyWindowsIfNecessary(traceId); } - private Flyweight.Builder.Visitor beginExToKafka() + private Flyweight.Builder.Visitor beginExToKafka( + Consumer beginExToKafkaMerged) { return (buffer, offset, maxLimit) -> kafkaBeginExRW.wrap(buffer, offset, maxLimit) .typeId(kafkaTypeId) - .merged(beginExToKafkaMerged()) + .merged(beginExToKafkaMerged) .build() .limit() - offset; } - private Consumer beginExToKafkaMerged() + private Consumer beginExToKafkaMergedFetchOnly() { return builder -> { @@ -1640,6 +1662,15 @@ private Consumer beginExToKafkaMerged() }; } + private Consumer beginExToKafkaMergedProduceOnly() + { + return builder -> + { + builder.capabilities(c -> c.set(PRODUCE_ONLY)).topic(topic); + leadersByPartitionId.intForEach((k, v) -> builder.partitionsItem(i -> i.partitionId(k))); + }; + } + private void doMergedReplyData( long traceId, int flags, @@ -1806,6 +1837,19 @@ private void doMergedConsumerReplyFlush( traceId, authorization, 0, kafkaFlushExFW); } + private void doMergedProduceReplyFlush( + long traceId, + int partitionId) + { + final KafkaFlushExFW kafkaFlushExFW = kafkaFlushExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(mc -> mc.produce(c -> c.partitionId(partitionId))) + .build(); + + doFlush(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, 0, kafkaFlushExFW); + } + private void doMergedFetchReplyFlush( long traceId, int reserved, @@ -1968,6 +2012,7 @@ private void onTopicOffsetFetchDataChanged( { partitions.forEach(p -> offsetsByPartitionId.put(p.partitionId(), new KafkaPartitionOffset( + topic, p.partitionId(), p.partitionOffset() == LIVE.value() ? HISTORICAL.value() : p.partitionOffset(), 0, @@ -3870,6 +3915,8 @@ private void doProduceInitialData( final KafkaMergedProduceDataExFW kafkaMergedProduceDataEx = kafkaDataEx.merged().produce(); final int deferred = kafkaMergedProduceDataEx.deferred(); final long timestamp = kafkaMergedProduceDataEx.timestamp(); + final long producerId = kafkaMergedProduceDataEx.producerId(); + final short producerEpoch = kafkaMergedProduceDataEx.producerEpoch(); final KafkaOffsetFW partition = kafkaMergedProduceDataEx.partition(); final KafkaKeyFW key = kafkaMergedProduceDataEx.key(); final Array32FW headers = kafkaMergedProduceDataEx.headers(); @@ -3888,6 +3935,8 @@ private void doProduceInitialData( .produce(pr -> pr .deferred(deferred) .timestamp(timestamp) + .producerId(producerId) + .producerEpoch(producerEpoch) .sequence(sequence) .ackMode(a -> a.set(ackMode)) .key(k -> k diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaPartitionOffset.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaPartitionOffset.java index 969176b0f5..4de4dbb8de 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaPartitionOffset.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaPartitionOffset.java @@ -17,6 +17,7 @@ public final class KafkaPartitionOffset { + public final String topic; public final int partitionId; public final long partitionOffset; public final int generationId; @@ -25,16 +26,18 @@ public final class KafkaPartitionOffset public final long correlationId; public KafkaPartitionOffset( + String topic, int partitionId, long partitionOffset, int generationId, int leaderEpoch, String metadata) { - this(partitionId, partitionOffset, generationId, leaderEpoch, metadata, -1); + this(topic, partitionId, partitionOffset, generationId, leaderEpoch, metadata, -1); } public KafkaPartitionOffset( + String topic, int partitionId, long partitionOffset, int generationId, @@ -42,6 +45,7 @@ public KafkaPartitionOffset( String metadata, long correlationId) { + this.topic = topic; this.partitionId = partitionId; this.partitionOffset = partitionOffset; this.generationId = generationId; diff --git a/runtime/binding-kafka/src/main/moditect/module-info.java b/runtime/binding-kafka/src/main/moditect/module-info.java index 1487416b9f..b15ffc4089 100644 --- a/runtime/binding-kafka/src/main/moditect/module-info.java +++ b/runtime/binding-kafka/src/main/moditect/module-info.java @@ -15,9 +15,13 @@ */ module io.aklivity.zilla.runtime.binding.kafka { + requires io.aklivity.zilla.runtime.common; requires io.aklivity.zilla.runtime.engine; exports io.aklivity.zilla.runtime.binding.kafka.config; + exports io.aklivity.zilla.runtime.binding.kafka.identity; + + uses io.aklivity.zilla.runtime.binding.kafka.identity.KafkaClientIdSupplierFactorySpi; provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBindingFactorySpi; @@ -27,4 +31,7 @@ provides io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi with io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaConditionConfigAdapter; + + provides io.aklivity.zilla.runtime.binding.kafka.identity.KafkaClientIdSupplierFactorySpi + with io.aklivity.zilla.runtime.binding.kafka.internal.identity.KafkaConfluentClientIdSupplierFactory; } diff --git a/runtime/binding-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.binding.kafka.identity.KafkaClientIdSupplierFactorySpi b/runtime/binding-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.binding.kafka.identity.KafkaClientIdSupplierFactorySpi new file mode 100644 index 0000000000..f8ded993c1 --- /dev/null +++ b/runtime/binding-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.binding.kafka.identity.KafkaClientIdSupplierFactorySpi @@ -0,0 +1 @@ +io.aklivity.zilla.runtime.binding.kafka.internal.identity.KafkaConfluentClientIdSupplierFactory diff --git a/runtime/binding-kafka/src/main/zilla/internal.idl b/runtime/binding-kafka/src/main/zilla/internal.idl index c6ae057f28..e8c791b2d0 100644 --- a/runtime/binding-kafka/src/main/zilla/internal.idl +++ b/runtime/binding-kafka/src/main/zilla/internal.idl @@ -23,10 +23,13 @@ scope internal int64 timestamp; int64 ownerId; int64 acknowledge = 0; + int64 producerId = -1; + int16 producerEpoch = -1; int32 sequence = -1; int64 ancestor; int64 descendant; int32 flags = 0; // 0x01 = DIRTY, 0x02 = COMPLETED, 0x04 = ABORTED, 0x08 = CONTROL + int32 convertedPosition = -1; int32 deltaPosition = -1; int16 ackMode = -1; kafka::KafkaKey key; @@ -38,6 +41,14 @@ scope internal octets[paddingLen] padding; } + struct KafkaCachePaddedValue + { + int32 length; + octets[length] value = null; + uint32 paddingLen; + octets[paddingLen] padding; + } + struct KafkaCacheDelta { int32 length; diff --git a/runtime/binding-kafka/src/main/zilla/protocol.idl b/runtime/binding-kafka/src/main/zilla/protocol.idl index 715446ad8e..bd106eb2b8 100644 --- a/runtime/binding-kafka/src/main/zilla/protocol.idl +++ b/runtime/binding-kafka/src/main/zilla/protocol.idl @@ -338,6 +338,23 @@ scope protocol { int32 throttleTimeMillis; } + + struct InitProducerIdRequest + { + string16 transaction = null; + int32 transactionTimeoutMs = 60000; + int64 producerId; + int16 producerEpoch; + } + + struct InitProducerIdResponse + { + int32 correlationId; + int32 throttleTimeMillis; + int16 errorCode; + int64 producerId; + int16 producerEpoch; + } } scope group diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplierTest.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplierTest.java new file mode 100644 index 0000000000..4dca841e67 --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/identity/KafkaClientIdSupplierTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.identity; + +import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration.KAFKA_CLIENT_ID; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import java.util.Properties; + +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.Configuration; + +public class KafkaClientIdSupplierTest +{ + @Test + public void shouldNotSupplyClientIdWhenNotConfigured() throws Exception + { + Configuration config = new Configuration(); + KafkaClientIdSupplier supplier = KafkaClientIdSupplier.instantiate(config); + + String clientId = supplier.get("localhost"); + + assertNull(clientId); + } + + @Test + public void shouldSupplyClientIdWhenConfigured() throws Exception + { + Properties properties = new Properties(); + properties.setProperty(KAFKA_CLIENT_ID.name(), "custom client id"); + Configuration config = new Configuration(properties); + KafkaClientIdSupplier supplier = KafkaClientIdSupplier.instantiate(config); + + String clientId = supplier.get("localhost"); + + assertEquals("custom client id", clientId); + } + + @Test + public void shouldSupplyClientIdWhenConfluentServer() throws Exception + { + Configuration config = new Configuration(); + KafkaClientIdSupplier supplier = KafkaClientIdSupplier.instantiate(config); + String server = "broker.confluent.cloud"; + + String clientId = supplier.get(server); + + assertNotNull(clientId); + } +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCachePartitionTest.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCachePartitionTest.java index adc06fb7df..4eb5dc88c3 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCachePartitionTest.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/cache/KafkaCachePartitionTest.java @@ -27,6 +27,7 @@ import java.nio.file.Path; import org.agrona.MutableDirectBuffer; +import org.agrona.collections.MutableInteger; import org.agrona.concurrent.UnsafeBuffer; import org.junit.Rule; import org.junit.Test; @@ -200,6 +201,9 @@ public void shouldCleanSegment() throws Exception int slotCapacity = ENGINE_BUFFER_SLOT_CAPACITY.get(config); MutableDirectBuffer writeBuffer = new UnsafeBuffer(ByteBuffer.allocate(slotCapacity * 2)); + MutableInteger entryMark = new MutableInteger(0); + MutableInteger valueMark = new MutableInteger(0); + MutableInteger valueLimit = new MutableInteger(0); KafkaKeyFW key = new KafkaKeyFW.Builder().wrap(writeBuffer, 0, writeBuffer.capacity()) .length(4) @@ -223,12 +227,14 @@ public void shouldCleanSegment() throws Exception Node head10 = partition.append(10L); KafkaCacheSegment head10s = head10.segment(); - partition.writeEntry(11L, 0L, -1L, key, headers, value, null, 0x00, KafkaDeltaType.NONE, null); + partition.writeEntry(null, 1L, 11L, entryMark, valueMark, 0L, -1L, + key, headers, value, null, 0x00, KafkaDeltaType.NONE, null, null, false); long keyHash = partition.computeKeyHash(key); KafkaCacheEntryFW ancestor = head10.findAndMarkAncestor(key, keyHash, 11L, ancestorRO); - partition.writeEntry(12L, 0L, -1L, key, headers, value, ancestor, 0x00, KafkaDeltaType.NONE, null); + partition.writeEntry(null, 1L, 12L, entryMark, valueMark, 0L, -1L, + key, headers, value, ancestor, 0x00, KafkaDeltaType.NONE, null, null, false); Node head15 = partition.append(15L); KafkaCacheSegment head15s = head15.segment(); @@ -255,6 +261,9 @@ public void shouldSeekAncestor() throws Exception KafkaCacheTopicConfig config = new KafkaCacheTopicConfig(new KafkaConfiguration()); MutableDirectBuffer writeBuffer = new UnsafeBuffer(ByteBuffer.allocate(1024)); + MutableInteger entryMark = new MutableInteger(0); + MutableInteger valueMark = new MutableInteger(0); + MutableInteger valueLimit = new MutableInteger(0); KafkaKeyFW key = new KafkaKeyFW.Builder().wrap(writeBuffer, 0, writeBuffer.capacity()) .length(4) @@ -274,12 +283,14 @@ public void shouldSeekAncestor() throws Exception KafkaCachePartition partition = new KafkaCachePartition(location, config, "cache", "test", 0, 65536, long[]::new); Node head10 = partition.append(10L); - partition.writeEntry(11L, 0L, -1L, key, headers, value, null, 0x00, KafkaDeltaType.NONE, null); + partition.writeEntry(null, 1L, 11L, entryMark, valueMark, 0L, -1L, + key, headers, value, null, 0x00, KafkaDeltaType.NONE, null, null, false); long keyHash = partition.computeKeyHash(key); KafkaCacheEntryFW ancestor = head10.findAndMarkAncestor(key, keyHash, 11L, ancestorRO); - partition.writeEntry(12L, 0L, -1L, key, headers, value, ancestor, 0x00, KafkaDeltaType.NONE, null); + partition.writeEntry(null, 1L, 12L, entryMark, valueMark, 0L, -1L, + key, headers, value, ancestor, 0x00, KafkaDeltaType.NONE, null, null, false); Node head15 = partition.append(15L); Node tail10 = head15.previous(); diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapterTest.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapterTest.java index b50e1831d8..d94b68fa5f 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapterTest.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapterTest.java @@ -34,7 +34,7 @@ import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.config.KafkaServerConfig; import io.aklivity.zilla.runtime.binding.kafka.config.KafkaTopicConfig; -import io.aklivity.zilla.runtime.engine.test.internal.validator.config.TestValidatorConfig; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; public class KafkaOptionsConfigAdapterTest { @@ -88,7 +88,7 @@ public void shouldWriteOptions() { KafkaOptionsConfig options = new KafkaOptionsConfig( singletonList("test"), - singletonList(new KafkaTopicConfig("test", LIVE, JSON_PATCH, null, TestValidatorConfig.builder().build())), + singletonList(new KafkaTopicConfig("test", LIVE, JSON_PATCH, null, TestModelConfig.builder().build())), singletonList(new KafkaServerConfig("localhost", 9092)), new KafkaSaslConfig("plain", "username", "password")); @@ -161,7 +161,10 @@ public void shouldWriteCatalogOptions() { KafkaOptionsConfig options = new KafkaOptionsConfig( singletonList("test"), - singletonList(new KafkaTopicConfig("test", LIVE, JSON_PATCH, null, new TestValidatorConfig())), + singletonList(new KafkaTopicConfig("test", LIVE, JSON_PATCH, null, + TestModelConfig.builder() + .length(0) + .build())), singletonList(new KafkaServerConfig("localhost", 9092)), new KafkaSaslConfig("plain", "username", "password")); diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java index e3e8d9417c..b2f9e00260 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java @@ -290,30 +290,6 @@ public void shouldReceiveMessageValue() throws Exception k3po.finish(); } - @Test - @Configuration("cache.options.validate.yaml") - @Specification({ - "${app}/message.value.valid/client", - "${app}/message.value.valid/server"}) - @ScriptProperty("serverAddress \"zilla://streams/app1\"") - public void shouldReceiveMessageValueTest() throws Exception - { - partition.append(10L); - k3po.finish(); - } - - @Test - @Configuration("cache.options.validate.yaml") - @Specification({ - "${app}/message.value.invalid/client", - "${app}/message.value.invalid/server"}) - @ScriptProperty("serverAddress \"zilla://streams/app1\"") - public void shouldReceiveMessageValueTestInvalid() throws Exception - { - partition.append(10L); - k3po.finish(); - } - @Test @Configuration("cache.yaml") @Specification({ diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java index e6091b5d26..3507a80750 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java @@ -237,6 +237,36 @@ public void shouldFetchMergedMessageValues() throws Exception k3po.finish(); } + @Test + @Configuration("cache.options.convert.yaml") + @Specification({ + "${app}/merged.fetch.message.value.convert/client", + "${app}/unmerged.fetch.message.value.convert/server"}) + public void shouldFetchMergedMessageValueConvert() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("cache.options.validate.yaml") + @Specification({ + "${app}/merged.fetch.message.value.valid/client", + "${app}/unmerged.fetch.message.value.valid/server"}) + public void shouldFetchMergedMessageValueValid() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("cache.options.validate.yaml") + @Specification({ + "${app}/merged.fetch.message.value.invalid/client", + "${app}/unmerged.fetch.message.value.invalid/server"}) + public void shouldFetchMergedMessageValueInvalid() throws Exception + { + k3po.finish(); + } + @Test @Configuration("cache.options.merged.yaml") @Specification({ @@ -341,6 +371,26 @@ public void shouldProduceMergedMessageValuesByDefault() throws Exception k3po.finish(); } + @Test + @Configuration("cache.yaml") + @Specification({ + "${app}/merged.produce.message.values.producer.id/client", + "${app}/unmerged.produce.message.values.producer.id/server"}) + public void shouldProduceMergedMessageValuesWithProducerId() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("cache.yaml") + @Specification({ + "${app}/merged.produce.message.value.partition.id/client", + "${app}/unmerged.produce.message.value.partition.id/server"}) + public void shouldProduceMergedMessageValueByGettingPartitionId() throws Exception + { + k3po.finish(); + } + @Test @Configuration("cache.options.merged.yaml") @Specification({ diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetCommitIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetCommitIT.java new file mode 100644 index 0000000000..209b49b3a4 --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetCommitIT.java @@ -0,0 +1,90 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration.KAFKA_CACHE_SERVER_BOOTSTRAP; +import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration.KAFKA_CACHE_SERVER_RECONNECT_DELAY; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_BUFFER_SLOT_CAPACITY; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.ScriptProperty; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class CacheOffsetCommitIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(10, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .countersBufferCapacity(8192) + .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) + .configure(KAFKA_CACHE_SERVER_BOOTSTRAP, false) + .configure(KAFKA_CACHE_SERVER_RECONNECT_DELAY, 0) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("app1") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + + + @Test + @Configuration("cache.yaml") + @Specification({ + "${app}/update.topic.partition.offset/client", + "${app}/update.topic.partition.offset/server"}) + @ScriptProperty("serverAddress \"zilla://streams/app1\"") + public void shouldUpdateTopicPartitionOffset() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("cache.yaml") + @Specification({ + "${app}/update.topic.partition.offsets/client", + "${app}/update.topic.partition.offsets/server"}) + @ScriptProperty("serverAddress \"zilla://streams/app1\"") + public void shouldUpdateTopicPartitionOffsets() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("cache.yaml") + @Specification({ + "${app}/update.unknown.topic.partition.offset/client", + "${app}/update.unknown.topic.partition.offset/server"}) + @ScriptProperty("serverAddress \"zilla://streams/app1\"") + public void shouldRejectUnknownTopicPartitionOffset() throws Exception + { + k3po.finish(); + } +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientInitProducerIdIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientInitProducerIdIT.java new file mode 100644 index 0000000000..44f780f4e1 --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientInitProducerIdIT.java @@ -0,0 +1,60 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class ClientInitProducerIdIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/init.producer.id"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .countersBufferCapacity(8192) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/produce.new.id/client", + "${net}/produce.new.id/server"}) + public void shouldGenerateNewProducerId() throws Exception + { + k3po.finish(); + } +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientInitProducerIdSaslIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientInitProducerIdSaslIT.java new file mode 100644 index 0000000000..723c93b22d --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientInitProducerIdSaslIT.java @@ -0,0 +1,79 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfigurationTest.KAFKA_CLIENT_SASL_SCRAM_NONCE_NAME; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; +import io.aklivity.zilla.runtime.engine.test.annotation.Configure; + +public class ClientInitProducerIdSaslIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/init.producer.id"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .countersBufferCapacity(8192) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + + @Test + @Configuration("client.options.sasl.plain.yaml") + @Specification({ + "${app}/produce.new.id/client", + "${net}/produce.new.id.sasl.plain/server"}) + public void shouldGenerateNewProducerIdWithSaslPlain() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.options.sasl.scram.yaml") + @Specification({ + "${app}/produce.new.id/client", + "${net}/produce.new.id.sasl.scram/server"}) + @Configure(name = KAFKA_CLIENT_SASL_SCRAM_NONCE_NAME, + value = "io.aklivity.zilla.runtime.binding.kafka.internal.stream.ClientInitProducerIdSaslIT::supplyNonce") + public void shouldGenerateNewProducerIdWithSaslScram() throws Exception + { + k3po.finish(); + } + + public static String supplyNonce() + { + return "fyko+d2lbbFgONRv9qkxdawL"; + } +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceIT.java index 5a22d21243..4de16afec5 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceIT.java @@ -152,6 +152,47 @@ public void shouldSendMessageKeyDistinct() throws Exception k3po.finish(); } + @Test + @Configuration("client.when.topic.yaml") + @Specification({ + "${app}/message.producer.id/client", + "${net}/message.producer.id/server"}) + public void shouldSendMessageValueWithProducerId() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.when.topic.yaml") + @Specification({ + "${app}/message.values.producer.id/client", + "${net}/message.values.producer.id/server"}) + @Configure(name = KafkaConfigurationTest.KAFKA_CLIENT_PRODUCE_MAX_REQUEST_MILLIS_NAME, value = "200") + public void shouldSendMessageValuesWithProducerId() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.when.topic.yaml") + @Specification({ + "${app}/message.values.producer.id.replay/client", + "${net}/message.values.producer.id.replay/server"}) + public void shouldReplyMessageValuesWithProducerId() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.when.topic.yaml") + @Specification({ + "${app}/message.values.producer.id.changes/client", + "${net}/message.values.producer.id.changes/server"}) + public void shouldReplyMessageValuesWithProducerIdThatChanges() throws Exception + { + k3po.finish(); + } + @Test @Configuration("client.when.topic.yaml") @Specification({ diff --git a/runtime/binding-mqtt-kafka/pom.xml b/runtime/binding-mqtt-kafka/pom.xml index 4f7b07ba95..114e875e4e 100644 --- a/runtime/binding-mqtt-kafka/pom.xml +++ b/runtime/binding-mqtt-kafka/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml @@ -26,7 +26,7 @@ 11 11 - 0.90 + 0.89 0 @@ -108,7 +108,7 @@ flyweight-maven-plugin ${project.version} - core mqtt kafka internal + core mqtt mqtt_kafka kafka internal io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types diff --git a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java index 12a5f5dbe8..afcd7c7d6e 100644 --- a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java +++ b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java @@ -23,6 +23,7 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; + import io.aklivity.zilla.runtime.binding.mqtt.kafka.config.MqttKafkaConditionKind; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionFactory; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.Array32FW; diff --git a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java index 7648a531bb..0a0e4a3ee4 100644 --- a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java +++ b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java @@ -48,15 +48,16 @@ public MqttKafkaProxyFactory( { final Long2ObjectHashMap bindings = new Long2ObjectHashMap<>(); final Int2ObjectHashMap factories = new Int2ObjectHashMap<>(); + final Long2ObjectHashMap clientMetadata = new Long2ObjectHashMap<>(); - final MqttKafkaPublishFactory publishFactory = new MqttKafkaPublishFactory( - config, context, bindings::get); + final MqttKafkaPublishFactory publishFactory = new MqttKafkaPublishFactory(config, context, bindings::get, + clientMetadata::get); final MqttKafkaSubscribeFactory subscribeFactory = new MqttKafkaSubscribeFactory( config, context, bindings::get); final MqttKafkaSessionFactory sessionFactory = new MqttKafkaSessionFactory( - config, context, instanceId, bindings::get); + config, context, instanceId, bindings::get, clientMetadata); factories.put(MqttBeginExFW.KIND_PUBLISH, publishFactory); factories.put(MqttBeginExFW.KIND_SUBSCRIBE, subscribeFactory); diff --git a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java index 727462d7ec..9b9a809083 100644 --- a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java +++ b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java @@ -19,7 +19,10 @@ import static java.time.Instant.now; import java.nio.ByteOrder; +import java.util.ArrayList; +import java.util.LinkedList; import java.util.List; +import java.util.Queue; import java.util.function.Function; import java.util.function.LongFunction; import java.util.function.LongUnaryOperator; @@ -29,12 +32,17 @@ import org.agrona.MutableDirectBuffer; import org.agrona.collections.Int2IntHashMap; import org.agrona.collections.Int2ObjectHashMap; +import org.agrona.collections.Long2LongHashMap; +import org.agrona.collections.Long2ObjectHashMap; import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaBindingConfig; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaHeaderHelper; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaPublishMetadata.KafkaGroup; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaPublishMetadata.KafkaOffsetMetadata; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaPublishMetadata.KafkaTopicPartition; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaAckMode; @@ -44,6 +52,8 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormat; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormatFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPublishFlags; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPublishOffsetMetadataFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttQoS; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.AbortFW; @@ -55,6 +65,8 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaFlushExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMergedFlushExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMergedProduceFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaResetExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttDataExFW; @@ -70,7 +82,6 @@ public class MqttKafkaPublishFactory implements MqttKafkaStreamFactory { private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); - private static final KafkaAckMode KAFKA_DEFAULT_ACK_MODE = KafkaAckMode.LEADER_ONLY; private static final String KAFKA_TYPE_NAME = "kafka"; private static final String MQTT_TYPE_NAME = "mqtt"; private static final byte SLASH_BYTE = (byte) '/'; @@ -83,6 +94,7 @@ public class MqttKafkaPublishFactory implements MqttKafkaStreamFactory private static final int KAFKA_ERROR_RECORD_LIST_TOO_LARGE = 18; private static final int KAFKA_ERROR_MESSAGE_TOO_LARGE = 10; private static final Int2IntHashMap MQTT_REASON_CODES; + private static final int OFFSET_METADATA_VERSION = 1; static { @@ -117,17 +129,20 @@ public class MqttKafkaPublishFactory implements MqttKafkaStreamFactory private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); private final MqttDataExFW mqttDataExRO = new MqttDataExFW(); private final KafkaResetExFW kafkaResetExRO = new KafkaResetExFW(); + private final KafkaFlushExFW kafkaFlushExRO = new KafkaFlushExFW(); private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); private final MqttResetExFW.Builder mqttResetExRW = new MqttResetExFW.Builder(); + private final MqttPublishOffsetMetadataFW.Builder mqttOffsetMetadataRW = new MqttPublishOffsetMetadataFW.Builder(); private final Array32FW.Builder kafkaHeadersRW = new Array32FW.Builder<>(new KafkaHeaderFW.Builder(), new KafkaHeaderFW()); private final MutableDirectBuffer writeBuffer; private final MutableDirectBuffer extBuffer; private final MutableDirectBuffer kafkaHeadersBuffer; + private final MutableDirectBuffer offsetBuffer; private final BindingHandler streamFactory; private final LongUnaryOperator supplyInitialId; private final LongUnaryOperator supplyReplyId; @@ -138,17 +153,20 @@ public class MqttKafkaPublishFactory implements MqttKafkaStreamFactory private final String16FW binaryFormat; private final String16FW textFormat; private final Int2ObjectHashMap qosLevels; + private final LongFunction supplyClientMetadata; public MqttKafkaPublishFactory( MqttKafkaConfiguration config, EngineContext context, - LongFunction supplyBinding) + LongFunction supplyBinding, + LongFunction supplyClientMetadata) { this.kafkaTypeId = context.supplyTypeId(KAFKA_TYPE_NAME); this.mqttTypeId = context.supplyTypeId(MQTT_TYPE_NAME); this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.kafkaHeadersBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.offsetBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.helper = new MqttKafkaHeaderHelper(); this.streamFactory = context.streamFactory(); this.supplyInitialId = context::supplyInitialId; @@ -160,6 +178,7 @@ public MqttKafkaPublishFactory( this.qosLevels.put(0, new String16FW("0")); this.qosLevels.put(1, new String16FW("1")); this.qosLevels.put(2, new String16FW("2")); + this.supplyClientMetadata = supplyClientMetadata; } @Override @@ -175,6 +194,7 @@ public MessageConsumer newStream( final long routedId = begin.routedId(); final long initialId = begin.streamId(); final long authorization = begin.authorization(); + final long affinity = begin.affinity(); final OctetsFW extension = begin.extension(); final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); @@ -191,14 +211,16 @@ public MessageConsumer newStream( { final long resolvedId = resolved.id; final String16FW messagesTopic = resolved.messages; - newStream = new MqttPublishProxy(mqtt, originId, routedId, initialId, resolvedId, - messagesTopic, binding.retainedTopic(), binding.clients)::onMqttMessage; + final int qos = mqttPublishBeginEx.qos(); + final MqttPublishProxy proxy = new MqttPublishProxy(mqtt, originId, routedId, initialId, resolvedId, affinity, + binding, messagesTopic, binding.retainedTopic(), qos, binding.clients); + newStream = proxy::onMqttMessage; } return newStream; } - private final class MqttPublishProxy + public final class MqttPublishProxy { private final MessageConsumer mqtt; private final long originId; @@ -214,6 +236,7 @@ private final class MqttPublishProxy private long initialSeq; private long initialAck; private int initialMax; + private long affinity; private long replySeq; private long replyAck; @@ -227,6 +250,13 @@ private final class MqttPublishProxy private OctetsFW clientIdOctets; private boolean retainAvailable; private int publishFlags; + private int packetId; + private int qos; + private KafkaOffsetCommitStream offsetCommit; + private Long2ObjectHashMap offsets; + private Int2ObjectHashMap> partitions; + private Long2LongHashMap leaderEpochs; + private KafkaGroup group; private MqttPublishProxy( MessageConsumer mqtt, @@ -234,8 +264,11 @@ private MqttPublishProxy( long routedId, long initialId, long resolvedId, + long affinity, + MqttKafkaBindingConfig binding, String16FW kafkaMessagesTopic, String16FW kafkaRetainedTopic, + int qos, List> clients) { this.mqtt = mqtt; @@ -243,8 +276,14 @@ private MqttPublishProxy( this.routedId = routedId; this.initialId = initialId; this.replyId = supplyReplyId.applyAsLong(initialId); - this.messages = new KafkaMessagesProxy(originId, resolvedId, this, kafkaMessagesTopic); - this.retained = new KafkaRetainedProxy(originId, resolvedId, this, kafkaRetainedTopic); + this.affinity = affinity; + this.qos = qos; + if (qos == MqttQoS.EXACTLY_ONCE.value()) + { + this.offsetCommit = new KafkaOffsetCommitStream(originId, resolvedId, this); + } + this.messages = new KafkaMessagesProxy(originId, resolvedId, affinity, this, kafkaMessagesTopic); + this.retained = new KafkaRetainedProxy(originId, resolvedId, affinity, this, kafkaRetainedTopic); this.clients = clients; } @@ -310,7 +349,7 @@ private void onMqttBegin( String topicName = mqttPublishBeginEx.topic().asString(); assert topicName != null; - final int qos = mqttPublishBeginEx.qos(); + this.qos = mqttPublishBeginEx.qos(); final String16FW clientId = mqttPublishBeginEx.clientId(); final MutableDirectBuffer clientIdBuffer = new UnsafeBuffer(new byte[clientId.sizeof() + 2]); @@ -355,28 +394,25 @@ private void onMqttBegin( .value(clientHashKeyBuffer, 0, clientHashKeyBuffer.capacity()) .build(); } - - messages.doKafkaBegin(traceId, authorization, affinity, qos); this.retainAvailable = (mqttPublishBeginEx.flags() & 1 << MqttPublishFlags.RETAIN.value()) != 0; - if (retainAvailable) + + if (qos == MqttQoS.EXACTLY_ONCE.value()) { - retained.doKafkaBegin(traceId, authorization, affinity, qos); + final MqttKafkaPublishMetadata clientMetadata = supplyClientMetadata.apply(affinity); + this.offsets = clientMetadata.offsets; + this.partitions = clientMetadata.partitions; + this.leaderEpochs = clientMetadata.leaderEpochs; + this.group = clientMetadata.group; + offsetCommit.doKafkaBegin(traceId, authorization, affinity, retainAvailable); } - } - - private String clientHashKey( - String topicName) - { - String clientHashKey = null; - if (clients != null) + else { - for (Function client : clients) + messages.doKafkaBegin(traceId, authorization, affinity, qos); + if (retainAvailable) { - clientHashKey = client.apply(topicName); - break; + retained.doKafkaBegin(traceId, authorization, affinity, qos); } } - return clientHashKey; } private void onMqttData( @@ -388,9 +424,10 @@ private void onMqttData( final long authorization = data.authorization(); final long budgetId = data.budgetId(); final int reserved = data.reserved(); - final int flags = data.flags(); final OctetsFW payload = data.payload(); final OctetsFW extension = data.extension(); + int flags = data.flags(); + int kafkaFlags = data.flags(); assert acknowledge <= sequence; assert sequence >= initialSeq; @@ -406,6 +443,8 @@ private void onMqttData( mqttDataEx = extension.get(mqttDataExRO::tryWrap); } + int deferred; + if ((flags & DATA_FLAG_INIT) != 0x00) { assert mqttDataEx.kind() == MqttDataExFW.KIND_PUBLISH; @@ -454,35 +493,106 @@ private void onMqttData( addHeader(helper.kafkaCorrelationHeaderName, mqttPublishDataEx.correlation().bytes()); } - mqttPublishDataEx.properties().forEach(property -> addHeader(property.key(), property.value())); addHeader(helper.kafkaQosHeaderName, qosLevels.get(mqttPublishDataEx.qos())); - final int deferred = mqttPublishDataEx.deferred(); + deferred = mqttPublishDataEx.deferred(); + + long producerId; + short producerEpoch; + long producerSequence; + + if (qos == MqttQoS.EXACTLY_ONCE.value()) + { + kafkaFlags = flags & ~DATA_FLAG_FIN; + final long offsetKey = offsetKey(messages.topicString, messages.qos2PartitionId); + final KafkaOffsetMetadata metadata = offsets.get(offsetKey); + producerId = metadata.producerId; + producerEpoch = metadata.producerEpoch; + producerSequence = metadata.sequence; + } + else + { + producerId = -1; + producerEpoch = -1; + producerSequence = -1; + } + kafkaDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) .merged(m -> m.produce(mp -> mp .deferred(deferred) .timestamp(now().toEpochMilli()) - .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .producerId(producerId) + .producerEpoch(producerEpoch) + .partition(p -> p.partitionId(-1).partitionOffset(producerSequence)) .key(b -> b.set(key)) .hashKey(this::setHashKey) .headers(kafkaHeadersRW.build()))) .build(); publishFlags = mqttPublishDataEx.flags(); + packetId = mqttPublishDataEx.packetId(); + } + else + { + deferred = 0; + if (qos == MqttQoS.EXACTLY_ONCE.value()) + { + kafkaFlags = flags & ~DATA_FLAG_FIN; + } } - messages.doKafkaData(traceId, authorization, budgetId, reserved, flags, payload, kafkaDataEx); + messages.doKafkaData(traceId, authorization, budgetId, reserved, kafkaFlags, payload, kafkaDataEx); + + if ((flags & DATA_FLAG_FIN) != 0x00 && qos == MqttQoS.EXACTLY_ONCE.value()) + { + doCommitOffsetIncomplete(traceId, authorization, messages.topicString, + messages.qos2PartitionId, packetId, messages); + } if (retainAvailable) { if (hasPublishFlagRetained(publishFlags)) { - retained.doKafkaData(traceId, authorization, budgetId, reserved, flags, payload, kafkaDataEx); + long producerId; + short producerEpoch; + long producerSequence; + + if (qos == MqttQoS.EXACTLY_ONCE.value()) + { + kafkaFlags = flags & ~DATA_FLAG_FIN; + final long offsetKey = offsetKey(messages.topicString, messages.qos2PartitionId); + final KafkaOffsetMetadata metadata = offsets.get(offsetKey); + producerId = metadata.producerId; + producerEpoch = metadata.producerEpoch; + producerSequence = metadata.sequence; + + kafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.produce(mp -> mp + .deferred(deferred) + .timestamp(now().toEpochMilli()) + .producerId(producerId) + .producerEpoch(producerEpoch) + .partition(p -> p.partitionId(-1).partitionOffset(producerSequence)) + .key(b -> b.set(key)) + .hashKey(this::setHashKey) + .headers(kafkaHeadersRW.build()))) + .build(); + } + + retained.doKafkaData(traceId, authorization, budgetId, reserved, kafkaFlags, payload, kafkaDataEx); + + if ((flags & DATA_FLAG_FIN) != 0x00 && qos == MqttQoS.EXACTLY_ONCE.value()) + { + doCommitOffsetIncomplete(traceId, authorization, retained.topicString, + retained.qos2PartitionId, packetId, retained); + } } else { @@ -498,41 +608,12 @@ private void onMqttData( } } - if ((flags & DATA_FLAG_FIN) != 0x00) + if ((flags & DATA_FLAG_FIN) != 0x00 && qos != MqttQoS.EXACTLY_ONCE.value()) { publishFlags = 0; } } - private void setHashKey( - KafkaKeyFW.Builder builder) - { - if (hashKey != null) - { - builder.set(hashKey); - } - } - - private void addFiltersHeader( - String16FW responseTopic) - { - final DirectBuffer responseBuffer = responseTopic.value(); - final int capacity = responseBuffer.capacity(); - - int offset = 0; - int matchAt = 0; - while (offset >= 0 && offset < capacity && matchAt != -1) - { - matchAt = indexOfByte(responseBuffer, offset, capacity, SLASH_BYTE); - if (matchAt != -1) - { - addHeader(helper.kafkaReplyFilterHeaderName, responseBuffer, offset, matchAt - offset); - offset = matchAt + 1; - } - } - addHeader(helper.kafkaReplyFilterHeaderName, responseBuffer, offset, capacity - offset); - } - private void onMqttEnd( EndFW end) { @@ -554,6 +635,10 @@ private void onMqttEnd( { retained.doKafkaEnd(traceId, initialSeq, authorization); } + if (offsetCommit != null) + { + offsetCommit.doKafkaEnd(traceId, authorization); + } } private void onMqttAbort( @@ -577,6 +662,10 @@ private void onMqttAbort( { retained.doKafkaAbort(traceId, authorization); } + if (offsetCommit != null) + { + offsetCommit.doKafkaAbort(traceId, authorization); + } } private void onMqttReset( @@ -603,6 +692,10 @@ private void onMqttReset( { retained.doKafkaReset(traceId); } + if (offsetCommit != null) + { + offsetCommit.doKafkaReset(traceId); + } } private void onMqttWindow( @@ -650,18 +743,6 @@ private void doMqttBegin( traceId, authorization, affinity); } - private void doMqttFlush( - long traceId, - long authorization, - long budgetId, - int reserved) - { - replySeq = messages.replySeq; - - doFlush(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization, budgetId, reserved, - EMPTY_OCTETS); - } - private void doMqttAbort( long traceId, long authorization) @@ -688,9 +769,21 @@ private void doMqttEnd( } } + private void doMqttReset( + long traceId, + Flyweight extension) + { + if (!MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); + + doReset(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, extension); + } + } + private void doMqttWindow( - long authorization, long traceId, + long authorization, long budgetId, int padding, int capabilities) @@ -699,28 +792,95 @@ private void doMqttWindow( final long newInitialAck = retainedFlag ? Math.min(messages.initialAck, retained.initialAck) : messages.initialAck; final int newInitialMax = retainedFlag ? Math.max(messages.initialMax, retained.initialMax) : messages.initialMax; - if (initialAck != newInitialAck || initialMax != newInitialMax) + if (MqttKafkaState.initialOpened(messages.state) && + (!retainedFlag || MqttKafkaState.initialOpened(retained.state)) && + (initialAck != newInitialAck || initialMax != newInitialMax)) { initialAck = newInitialAck; initialMax = newInitialMax; + int minimum = 0; + if (qos == MqttQoS.EXACTLY_ONCE.value()) + { + minimum = initialMax; + } assert initialAck <= initialSeq; - doWindow(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, padding, 0, capabilities); + traceId, authorization, budgetId, padding, minimum, capabilities); } } - private void doMqttReset( + private String clientHashKey( + String topicName) + { + String clientHashKey = null; + if (clients != null) + { + for (Function client : clients) + { + clientHashKey = client.apply(topicName); + break; + } + } + return clientHashKey; + } + + private void doCommitOffsetIncomplete( long traceId, - Flyweight extension) + long authorization, + String topic, + int partitionId, + int packetId, + KafkaProxy kafka) { - if (!MqttKafkaState.initialClosed(state)) + final long offsetKey = offsetKey(topic, partitionId); + final KafkaOffsetMetadata metadata = offsets.get(offsetKey); + metadata.packetIds.add(packetId); + Flyweight offsetCommitEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .offsetCommit(o -> o + .topic(topic) + .progress(p -> p + .partitionId(partitionId) + .partitionOffset(metadata.sequence) + .metadata(offsetMetadataToString(metadata))) + .generationId(group.generationId) + .leaderEpoch((int) leaderEpochs.get(offsetKey))) + .build(); + + offsetCommit.unfinishedKafkas.add(kafka); + partitions.computeIfAbsent(packetId, ArrayList::new).add(new KafkaTopicPartition(topic, partitionId)); + offsetCommit.doKafkaData(traceId, authorization, 0, DATA_FLAG_COMPLETE, offsetCommitEx); + } + + private void setHashKey( + KafkaKeyFW.Builder builder) + { + if (hashKey != null) { - state = MqttKafkaState.closeInitial(state); + builder.set(hashKey); + } + } - doReset(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, extension); + private void addFiltersHeader( + String16FW responseTopic) + { + final DirectBuffer responseBuffer = responseTopic.value(); + final int capacity = responseBuffer.capacity(); + + int offset = 0; + int matchAt = 0; + while (offset >= 0 && offset < capacity && matchAt != -1) + { + matchAt = indexOfByte(responseBuffer, offset, capacity, SLASH_BYTE); + if (matchAt != -1) + { + addHeader(helper.kafkaReplyFilterHeaderName, responseBuffer, offset, matchAt - offset); + offset = matchAt + 1; + } } + addHeader(helper.kafkaReplyFilterHeaderName, responseBuffer, offset, capacity - offset); } } @@ -814,106 +974,78 @@ private static boolean hasPublishFlagRetained( } - final class KafkaMessagesProxy + public abstract class KafkaProxy { - private MessageConsumer kafka; - private final long originId; - private final long routedId; - private final long initialId; - private final long replyId; - private final MqttPublishProxy delegate; - private final String16FW topic; - - private int state; - - private long initialSeq; - private long initialAck; - private int initialMax; - - private long replySeq; - private long replyAck; - private int replyMax; - private int replyPad; - - private KafkaMessagesProxy( + protected MessageConsumer kafka; + protected long mqttAffinity; + protected final long originId; + protected final long routedId; + protected final long initialId; + protected final long replyId; + protected final String16FW topic; + protected final String topicString; + + protected MqttPublishProxy delegate; + protected int state; + + protected long initialSeq; + protected long initialAck; + protected int initialMax; + protected int initialPad; + + protected long replySeq; + protected long replyAck; + protected int replyMax; + protected int replyPad; + protected int qos2PartitionId = -1; + + public KafkaProxy( long originId, long routedId, + long mqttAffinity, MqttPublishProxy delegate, String16FW topic) { this.originId = originId; this.routedId = routedId; + this.mqttAffinity = mqttAffinity; this.delegate = delegate; this.initialId = supplyInitialId.applyAsLong(routedId); this.replyId = supplyReplyId.applyAsLong(initialId); this.topic = topic; - - } - - private void doKafkaBegin( - long traceId, - long authorization, - long affinity, - int qos) - { - initialSeq = delegate.initialSeq; - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; - state = MqttKafkaState.openingInitial(state); - - kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, topic, qos); + this.topicString = topic.asString().intern(); } - private void doKafkaData( + abstract void doKafkaData( long traceId, long authorization, long budgetId, int reserved, int flags, OctetsFW payload, - Flyweight extension) - { - doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, flags, reserved, payload, extension); - - initialSeq += reserved; - - assert initialSeq <= initialAck + initialMax; - } + Flyweight extension); - private void doKafkaEnd( + public void sendKafkaFinData( long traceId, - long sequence, long authorization) { - if (!MqttKafkaState.initialClosed(state)) - { - initialSeq = delegate.initialSeq; - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; - state = MqttKafkaState.closeInitial(state); - - doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); - } + doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_FIN, EMPTY_OCTETS, EMPTY_OCTETS); } + } - private void doKafkaAbort( - long traceId, - long authorization) + public final class KafkaMessagesProxy extends KafkaProxy + { + public KafkaMessagesProxy( + long originId, + long routedId, + long mqttAffinity, + MqttPublishProxy delegate, + String16FW topic) { - if (!MqttKafkaState.initialClosed(state)) - { - initialSeq = delegate.initialSeq; - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; - state = MqttKafkaState.closeInitial(state); - - doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); - } + super(originId, routedId, mqttAffinity, delegate, topic); } - private void onKafkaMessage( + public void onKafkaMessage( int msgTypeId, DirectBuffer buffer, int index, @@ -974,6 +1106,7 @@ private void onKafkaBegin( assert replyAck <= replySeq; delegate.doMqttBegin(traceId, authorization, affinity); + doKafkaWindow(traceId, authorization, 0, 0, 0); } private void onKafkaData( @@ -996,6 +1129,42 @@ private void onKafkaData( delegate.doMqttAbort(traceId, authorization); } + private void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + final long reserved = flush.reserved(); + final OctetsFW extension = flush.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + + final ExtensionFW flushEx = extension.get(extensionRO::tryWrap); + final KafkaFlushExFW kafkaFlushEx = + flushEx != null && flushEx.typeId() == kafkaTypeId ? extension.get(kafkaFlushExRO::tryWrap) : null; + final KafkaMergedFlushExFW kafkaMergedFlushEx = + kafkaFlushEx != null && kafkaFlushEx.kind() == KafkaFlushExFW.KIND_MERGED ? kafkaFlushEx.merged() : null; + final KafkaMergedProduceFlushExFW kafkaMergedProduceFlushEx = kafkaMergedFlushEx != null && + kafkaMergedFlushEx.kind() == KafkaMergedFlushExFW.KIND_PRODUCE ? kafkaMergedFlushEx.produce() : null; + + if (kafkaMergedProduceFlushEx != null) + { + this.qos2PartitionId = kafkaMergedProduceFlushEx.partitionId(); + + if (!delegate.retainAvailable || delegate.retained.qos2PartitionId != -1) + { + delegate.doMqttWindow(traceId, authorization, 0, 0, 0); + } + } + } + private void onKafkaEnd( EndFW end) { @@ -1015,24 +1184,351 @@ private void onKafkaEnd( delegate.doMqttEnd(traceId, authorization); } - private void onKafkaFlush( - FlushFW flush) + private void onKafkaAbort( + AbortFW abort) { - final long sequence = flush.sequence(); - final long acknowledge = flush.acknowledge(); - final long traceId = flush.traceId(); - final long authorization = flush.authorization(); - final long budgetId = flush.budgetId(); - final int reserved = flush.reserved(); + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttAbort(traceId, authorization); + } + + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= initialAck; + + initialAck = acknowledge; + + assert initialAck <= initialSeq; + + final OctetsFW extension = reset.extension(); + final ExtensionFW resetEx = extension.get(extensionRO::tryWrap); + final KafkaResetExFW kafkaResetEx = + resetEx != null && resetEx.typeId() == kafkaTypeId ? extension.get(kafkaResetExRO::tryWrap) : null; + + Flyweight mqttResetEx = EMPTY_OCTETS; + if (kafkaResetEx != null) + { + mqttResetEx = mqttResetExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(mqttTypeId) + .reasonCode(MQTT_REASON_CODES.get(kafkaResetEx.error())) + .build(); + } + + delegate.doMqttReset(traceId, mqttResetEx); + } + + private void onKafkaWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + final boolean wasOpen = MqttKafkaState.initialOpened(state); + + assert acknowledge <= sequence; + assert acknowledge >= initialAck; + assert maximum >= initialMax; + + initialAck = acknowledge; + initialMax = maximum; + initialPad = padding; + state = MqttKafkaState.openInitial(state); + + assert initialAck <= initialSeq; + + if (wasOpen || delegate.qos < MqttQoS.EXACTLY_ONCE.value()) + { + delegate.doMqttWindow(traceId, authorization, budgetId, padding, capabilities); + } + else + { + final KafkaKeyFW hashKey = delegate.hashKey != null ? delegate.hashKey : delegate.key; + final KafkaFlushExFW kafkaFlushEx = + kafkaFlushExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.produce(p -> p.hashKey(hashKey))) + .build(); + doKafkaFlush(traceId, authorization, 0, kafkaFlushEx); + } + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity, + int qos) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + + if (!MqttKafkaState.initialOpening(state)) + { + state = MqttKafkaState.openingInitial(state); + + kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, topic, qos); + } + } + + void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doKafkaFlush( + long traceId, + long authorization, + long budgetId, + KafkaFlushExFW extension) + { + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, initialPad, extension); + } + + private void doKafkaEnd( + long traceId, + long sequence, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + replyAck = delegate.replyAck; + replyMax = delegate.replyMax; + replyPad = delegate.replyPad; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + } + + final class KafkaRetainedProxy extends KafkaProxy + { + KafkaRetainedProxy( + long originId, + long routedId, + long mqttAffinity, + MqttPublishProxy delegate, + String16FW topic) + { + super(originId, routedId, mqttAffinity, delegate, topic); + } + + private void onKafkaMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onKafkaAbort(abort); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onKafkaFlush(flush); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onKafkaWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onKafkaReset(reset); + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); assert acknowledge <= sequence; assert sequence >= replySeq; + assert acknowledge >= replyAck; replySeq = sequence; + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttBegin(traceId, authorization, affinity); + doKafkaWindow(traceId, authorization, 0, 0, 0); + } + + private void onKafkaData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + doKafkaReset(traceId); + + delegate.doMqttAbort(traceId, authorization); + } + + private void onKafkaEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttEnd(traceId, authorization); + } + + private void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + final long reserved = flush.reserved(); + final OctetsFW extension = flush.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; assert replyAck <= replySeq; - delegate.doMqttFlush(traceId, authorization, budgetId, reserved); + final ExtensionFW flushEx = extension.get(extensionRO::tryWrap); + final KafkaFlushExFW kafkaFlushEx = + flushEx != null && flushEx.typeId() == kafkaTypeId ? extension.get(kafkaFlushExRO::tryWrap) : null; + final KafkaMergedFlushExFW kafkaMergedFlushEx = + kafkaFlushEx != null && kafkaFlushEx.kind() == KafkaFlushExFW.KIND_MERGED ? kafkaFlushEx.merged() : null; + final KafkaMergedProduceFlushExFW kafkaMergedProduceFlushEx = kafkaMergedFlushEx != null && + kafkaMergedFlushEx.kind() == KafkaMergedFlushExFW.KIND_PRODUCE ? kafkaMergedFlushEx.produce() : null; + + if (kafkaMergedProduceFlushEx != null) + { + this.qos2PartitionId = kafkaMergedProduceFlushEx.partitionId(); + + if (delegate.messages.qos2PartitionId != -1) + { + delegate.doMqttWindow(traceId, authorization, 0, 0, 0); + } + } } private void onKafkaAbort( @@ -1054,6 +1550,38 @@ private void onKafkaAbort( delegate.doMqttAbort(traceId, authorization); } + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= initialAck; + + initialAck = acknowledge; + + assert initialAck <= initialSeq; + + final OctetsFW extension = reset.extension(); + final ExtensionFW resetEx = extension.get(extensionRO::tryWrap); + final KafkaResetExFW kafkaResetEx = + resetEx != null && resetEx.typeId() == kafkaTypeId ? extension.get(kafkaResetExRO::tryWrap) : null; + + Flyweight mqttResetEx = EMPTY_OCTETS; + if (kafkaResetEx != null) + { + mqttResetEx = mqttResetExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(mqttTypeId) + .reasonCode(MQTT_REASON_CODES.get(kafkaResetEx.error())) + .build(); + } + + delegate.doMqttReset(traceId, mqttResetEx); + } + private void onKafkaWindow( WindowFW window) { @@ -1065,50 +1593,114 @@ private void onKafkaWindow( final long budgetId = window.budgetId(); final int padding = window.padding(); final int capabilities = window.capabilities(); + final boolean wasOpen = MqttKafkaState.initialOpened(state); assert acknowledge <= sequence; - assert acknowledge >= delegate.initialAck; - assert maximum >= delegate.initialMax; + assert acknowledge >= initialAck; + assert maximum >= initialMax; initialAck = acknowledge; + initialPad = padding; initialMax = maximum; state = MqttKafkaState.openInitial(state); assert initialAck <= initialSeq; - delegate.doMqttWindow(authorization, traceId, budgetId, padding, capabilities); + + if (wasOpen) + { + delegate.doMqttWindow(traceId, authorization, budgetId, padding, capabilities); + } + else if (delegate.qos < MqttQoS.EXACTLY_ONCE.value()) + { + delegate.doMqttWindow(traceId, authorization, budgetId, padding, capabilities); + } + else + { + final KafkaKeyFW hashKey = delegate.hashKey != null ? delegate.hashKey : delegate.key; + final KafkaFlushExFW kafkaFlushEx = + kafkaFlushExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.produce(p -> p.hashKey(hashKey))) + .build(); + doKafkaFlush(traceId, authorization, 0, kafkaFlushEx); + } } - private void onKafkaReset( - ResetFW reset) + private void doKafkaBegin( + long traceId, + long authorization, + long affinity, + int qos) { - final long sequence = reset.sequence(); - final long acknowledge = reset.acknowledge(); - final long traceId = reset.traceId(); + initialSeq = 0; + initialAck = 0; + initialMax = delegate.initialMax; + + if (!MqttKafkaState.initialOpening(state)) + { + + state = MqttKafkaState.openingInitial(state); + + kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, topic, qos); + } + } + + void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doKafkaFlush( + long traceId, + long authorization, + long budgetId, + KafkaFlushExFW extension) + { + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, initialPad, extension); - assert acknowledge <= sequence; - assert acknowledge >= delegate.initialAck; + initialSeq += initialPad; - delegate.initialAck = acknowledge; + assert initialSeq <= initialAck + initialMax; + } - assert delegate.initialAck <= delegate.initialSeq; + private void doKafkaEnd( + long traceId, + long sequence, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); - final OctetsFW extension = reset.extension(); - final ExtensionFW resetEx = extension.get(extensionRO::tryWrap); - final KafkaResetExFW kafkaResetEx = - resetEx != null && resetEx.typeId() == kafkaTypeId ? extension.get(kafkaResetExRO::tryWrap) : null; + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } - Flyweight mqttResetEx = EMPTY_OCTETS; - if (kafkaResetEx != null) + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) { - mqttResetEx = mqttResetExRW - .wrap(extBuffer, 0, extBuffer.capacity()) - .typeId(mqttTypeId) - .reasonCode(MQTT_REASON_CODES.get(kafkaResetEx.error())) - .build(); - } + state = MqttKafkaState.closeInitial(state); - delegate.doMqttReset(traceId, mqttResetEx); + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } } private void doKafkaReset( @@ -1138,7 +1730,7 @@ private void doKafkaWindow( } } - final class KafkaRetainedProxy + private final class KafkaOffsetCommitStream { private MessageConsumer kafka; private final long originId; @@ -1146,87 +1738,59 @@ final class KafkaRetainedProxy private final long initialId; private final long replyId; private final MqttPublishProxy delegate; - private final String16FW topic; + private final Queue unfinishedKafkas; private int state; private long initialSeq; private long initialAck; - private int initialPad; private int initialMax; private long replySeq; private long replyAck; private int replyMax; private int replyPad; + private boolean retainAvailable; - private KafkaRetainedProxy( + + private KafkaOffsetCommitStream( long originId, long routedId, - MqttPublishProxy delegate, - String16FW topic) + MqttPublishProxy delegate) { this.originId = originId; this.routedId = routedId; this.delegate = delegate; this.initialId = supplyInitialId.applyAsLong(routedId); this.replyId = supplyReplyId.applyAsLong(initialId); - this.topic = topic; + this.unfinishedKafkas = new LinkedList<>(); } private void doKafkaBegin( long traceId, long authorization, long affinity, - int qos) + boolean retainAvailable) { - initialSeq = 0; - initialAck = 0; + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; initialMax = delegate.initialMax; state = MqttKafkaState.openingInitial(state); + this.retainAvailable = retainAvailable; - kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, topic, qos); - } - - private void doKafkaData( - long traceId, - long authorization, - long budgetId, - int reserved, - int flags, - OctetsFW payload, - Flyweight extension) - { - doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, flags, reserved, payload, extension); - - initialSeq += reserved; - - assert initialSeq <= initialAck + initialMax; - } - - private void doKafkaFlush( - long traceId, - long authorization, - long budgetId, - KafkaFlushExFW extension) - { - doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, initialPad, extension); - - initialSeq += initialPad; - - assert initialSeq <= initialAck + initialMax; + kafka = newOffsetCommitStream(this::onOffsetCommitMessage, originId, routedId, initialId, initialSeq, initialAck, + initialMax, traceId, authorization, affinity, delegate.group); } private void doKafkaEnd( long traceId, - long sequence, long authorization) { if (!MqttKafkaState.initialClosed(state)) { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; state = MqttKafkaState.closeInitial(state); doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); @@ -1239,13 +1803,16 @@ private void doKafkaAbort( { if (!MqttKafkaState.initialClosed(state)) { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; state = MqttKafkaState.closeInitial(state); doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); } } - private void onKafkaMessage( + private void onOffsetCommitMessage( int msgTypeId, DirectBuffer buffer, int index, @@ -1257,9 +1824,9 @@ private void onKafkaMessage( final BeginFW begin = beginRO.wrap(buffer, index, index + length); onKafkaBegin(begin); break; - case DataFW.TYPE_ID: - final DataFW data = dataRO.wrap(buffer, index, index + length); - onKafkaData(data); + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onKafkaWindow(window); break; case EndFW.TYPE_ID: final EndFW end = endRO.wrap(buffer, index, index + length); @@ -1269,14 +1836,6 @@ private void onKafkaMessage( final AbortFW abort = abortRO.wrap(buffer, index, index + length); onKafkaAbort(abort); break; - case FlushFW.TYPE_ID: - final FlushFW flush = flushRO.wrap(buffer, index, index + length); - onKafkaFlush(flush); - break; - case WindowFW.TYPE_ID: - final WindowFW window = windowRO.wrap(buffer, index, index + length); - onKafkaWindow(window); - break; case ResetFW.TYPE_ID: final ResetFW reset = resetRO.wrap(buffer, index, index + length); onKafkaReset(reset); @@ -1305,27 +1864,43 @@ private void onKafkaBegin( assert replyAck <= replySeq; - delegate.doMqttBegin(traceId, authorization, affinity); + doKafkaWindow(traceId, authorization, 0, 0, 0); } - private void onKafkaData( - DataFW data) + private void onKafkaWindow( + WindowFW window) { - final long sequence = data.sequence(); - final long acknowledge = data.acknowledge(); - final long traceId = data.traceId(); - final long authorization = data.authorization(); - final long budgetId = data.budgetId(); - final int reserved = data.reserved(); + final long traceId = window.traceId(); + final long authorization = window.authorization(); + final long budgetId = window.budgetId(); + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final boolean wasOpen = MqttKafkaState.initialOpened(state); assert acknowledge <= sequence; - assert sequence >= replySeq; + assert acknowledge >= initialAck; + assert maximum >= initialMax; - replySeq = sequence + reserved; + initialAck = acknowledge; + initialMax = maximum; + state = MqttKafkaState.openInitial(state); - assert replyAck <= replySeq; - doKafkaReset(traceId); - delegate.doMqttAbort(traceId, authorization); + assert initialAck <= initialSeq; + + if (!wasOpen) + { + delegate.messages.doKafkaBegin(traceId, authorization, 0, MqttQoS.EXACTLY_ONCE.value()); + if (retainAvailable) + { + delegate.retained.doKafkaBegin(traceId, authorization, 0, MqttQoS.EXACTLY_ONCE.value()); + } + } + else + { + final MqttKafkaPublishFactory.KafkaProxy kafka = unfinishedKafkas.remove(); + kafka.sendKafkaFinData(traceId, authorization); + } } private void onKafkaEnd( @@ -1333,8 +1908,6 @@ private void onKafkaEnd( { final long sequence = end.sequence(); final long acknowledge = end.acknowledge(); - final long traceId = end.traceId(); - final long authorization = end.authorization(); assert acknowledge <= sequence; assert sequence >= replySeq; @@ -1343,28 +1916,6 @@ private void onKafkaEnd( state = MqttKafkaState.closeReply(state); assert replyAck <= replySeq; - - delegate.doMqttEnd(traceId, authorization); - } - - private void onKafkaFlush( - FlushFW flush) - { - final long sequence = flush.sequence(); - final long acknowledge = flush.acknowledge(); - final long traceId = flush.traceId(); - final long authorization = flush.authorization(); - final long budgetId = flush.budgetId(); - final int reserved = flush.reserved(); - - assert acknowledge <= sequence; - assert sequence >= replySeq; - - replySeq = sequence; - - assert replyAck <= replySeq; - - delegate.doMqttFlush(traceId, authorization, budgetId, reserved); } private void onKafkaAbort( @@ -1386,32 +1937,6 @@ private void onKafkaAbort( delegate.doMqttAbort(traceId, authorization); } - private void onKafkaWindow( - WindowFW window) - { - final long sequence = window.sequence(); - final long acknowledge = window.acknowledge(); - final int maximum = window.maximum(); - final long authorization = window.authorization(); - final long traceId = window.traceId(); - final long budgetId = window.budgetId(); - final int padding = window.padding(); - final int capabilities = window.capabilities(); - - assert acknowledge <= sequence; - assert acknowledge >= initialAck; - assert maximum >= initialMax; - - initialAck = acknowledge; - initialPad = padding; - initialMax = maximum; - state = MqttKafkaState.openInitial(state); - - assert initialAck <= initialSeq; - - delegate.doMqttWindow(authorization, traceId, budgetId, padding, capabilities); - } - private void onKafkaReset( ResetFW reset) { @@ -1420,28 +1945,21 @@ private void onKafkaReset( final long traceId = reset.traceId(); assert acknowledge <= sequence; - assert acknowledge >= initialAck; - - initialAck = acknowledge; - - assert initialAck <= initialSeq; - final OctetsFW extension = reset.extension(); - final ExtensionFW resetEx = extension.get(extensionRO::tryWrap); - final KafkaResetExFW kafkaResetEx = - resetEx != null && resetEx.typeId() == kafkaTypeId ? extension.get(kafkaResetExRO::tryWrap) : null; + delegate.doMqttReset(traceId, EMPTY_OCTETS); + } - Flyweight mqttResetEx = EMPTY_OCTETS; - if (kafkaResetEx != null) - { - mqttResetEx = mqttResetExRW - .wrap(extBuffer, 0, extBuffer.capacity()) - .typeId(mqttTypeId) - .reasonCode(MQTT_REASON_CODES.get(kafkaResetEx.error())) - .build(); - } + private void doKafkaData( + long traceId, + long authorization, + long budgetId, + int flags, + Flyweight extension) + { + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, 0, EMPTY_OCTETS, extension); - delegate.doMqttReset(traceId, mqttResetEx); + assert initialSeq <= initialAck + initialMax; } private void doKafkaReset( @@ -1471,6 +1989,30 @@ private void doKafkaWindow( } } + private String16FW offsetMetadataToString( + KafkaOffsetMetadata metadata) + { + mqttOffsetMetadataRW.wrap(offsetBuffer, 0, offsetBuffer.capacity()); + mqttOffsetMetadataRW.version(OFFSET_METADATA_VERSION); + mqttOffsetMetadataRW.producerId(metadata.producerId); + mqttOffsetMetadataRW.producerEpoch(metadata.producerEpoch); + + if (metadata.packetIds != null) + { + metadata.packetIds.forEach(p -> mqttOffsetMetadataRW.appendPacketIds(p.shortValue())); + } + final MqttPublishOffsetMetadataFW offsetMetadata = mqttOffsetMetadataRW.build(); + return new String16FW(BitUtil.toHex(offsetMetadata.buffer().byteArray(), + offsetMetadata.offset(), offsetMetadata.limit())); + } + + private static long offsetKey( + String topic, + int partitionId) + { + final int topicHashCode = System.identityHashCode(topic.intern()); + return ((long) topicHashCode << 32) | (partitionId & 0xFFFFFFFFL); + } private void doBegin( MessageConsumer receiver, @@ -1661,6 +2203,51 @@ private MessageConsumer newKafkaStream( return receiver; } + private MessageConsumer newOffsetCommitStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + KafkaGroup group) + { + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .offsetCommit(o -> o + .groupId(group.groupId) + .memberId(group.memberId) + .instanceId(group.instanceId) + .host(group.host) + .port(group.port)) + .build(); + + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(kafkaBeginEx.buffer(), kafkaBeginEx.offset(), kafkaBeginEx.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + private void doWindow( MessageConsumer sender, long originId, diff --git a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishMetadata.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishMetadata.java new file mode 100644 index 0000000000..30bd6d28ef --- /dev/null +++ b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishMetadata.java @@ -0,0 +1,160 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; + +import java.util.List; +import java.util.function.IntConsumer; + +import org.agrona.BitUtil; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.Int2ObjectHashMap; +import org.agrona.collections.IntArrayList; +import org.agrona.collections.Long2LongHashMap; +import org.agrona.collections.Long2ObjectHashMap; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPublishOffsetMetadataFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; + +public class MqttKafkaPublishMetadata +{ + final Long2ObjectHashMap offsets; + final Int2ObjectHashMap> partitions; + final Long2LongHashMap leaderEpochs; + + KafkaGroup group; + + public MqttKafkaPublishMetadata( + Long2ObjectHashMap offsets, + Int2ObjectHashMap> partitions, + Long2LongHashMap leaderEpochs) + { + this.offsets = offsets; + this.partitions = partitions; + this.leaderEpochs = leaderEpochs; + } + + public static final class KafkaGroup + { + public final String instanceId; + public final String groupId; + public final String memberId; + public final String host; + public final int port; + public final int generationId; + + KafkaGroup( + String instanceId, + String groupId, + String memberId, + String host, + int port, + int generationId) + { + this.instanceId = instanceId; + this.groupId = groupId; + this.memberId = memberId; + this.host = host; + this.port = port; + this.generationId = generationId; + } + } + + public static final class KafkaTopicPartition + { + public final String topic; + public final int partitionId; + + KafkaTopicPartition( + String topic, + int partitionId) + { + this.topic = topic; + this.partitionId = partitionId; + } + } + + public static final class KafkaOffsetMetadata + { + public final long producerId; + public final short producerEpoch; + public final IntArrayList packetIds; + + public long sequence; + + KafkaOffsetMetadata( + long producerId, + short producerEpoch) + { + this(producerId, producerEpoch, new IntArrayList()); + } + + KafkaOffsetMetadata( + long producerId, + short producerEpoch, + IntArrayList packetIds) + { + this.sequence = 1; + this.producerId = producerId; + this.producerEpoch = producerEpoch; + this.packetIds = packetIds; + } + } + + public static final class KafkaOffsetMetadataHelper + { + private static final int OFFSET_METADATA_VERSION = 1; + + private final MqttPublishOffsetMetadataFW mqttOffsetMetadataRO = new MqttPublishOffsetMetadataFW(); + private final MqttPublishOffsetMetadataFW.Builder mqttOffsetMetadataRW = new MqttPublishOffsetMetadataFW.Builder(); + private final MutableDirectBuffer offsetBuffer; + + KafkaOffsetMetadataHelper( + MutableDirectBuffer offsetBuffer) + { + this.offsetBuffer = offsetBuffer; + } + + public KafkaOffsetMetadata stringToOffsetMetadata( + String16FW metadata) + { + final IntArrayList packetIds = new IntArrayList(); + UnsafeBuffer buffer = new UnsafeBuffer(BitUtil.fromHex(metadata.asString())); + final MqttPublishOffsetMetadataFW offsetMetadata = mqttOffsetMetadataRO.wrap(buffer, 0, buffer.capacity()); + if (offsetMetadata.packetIds() != null) + { + offsetMetadata.packetIds().forEachRemaining((IntConsumer) packetIds::add); + } + return new KafkaOffsetMetadata(offsetMetadata.producerId(), offsetMetadata.producerEpoch(), packetIds); + } + + public String16FW offsetMetadataToString( + KafkaOffsetMetadata metadata) + { + mqttOffsetMetadataRW.wrap(offsetBuffer, 0, offsetBuffer.capacity()); + mqttOffsetMetadataRW.version(OFFSET_METADATA_VERSION); + mqttOffsetMetadataRW.producerId(metadata.producerId); + mqttOffsetMetadataRW.producerEpoch(metadata.producerEpoch); + + if (metadata.packetIds != null) + { + metadata.packetIds.forEach(p -> mqttOffsetMetadataRW.appendPacketIds(p.shortValue())); + } + final MqttPublishOffsetMetadataFW offsetMetadata = mqttOffsetMetadataRW.build(); + return new String16FW(BitUtil.toHex(offsetMetadata.buffer().byteArray(), + offsetMetadata.offset(), offsetMetadata.limit())); + } + } +} diff --git a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java index 4d410d950d..4d371e38ed 100644 --- a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java +++ b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java @@ -19,24 +19,30 @@ import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; import static io.aklivity.zilla.runtime.engine.concurrent.Signaler.NO_CANCEL_ID; import static java.lang.System.currentTimeMillis; +import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.concurrent.TimeUnit.SECONDS; import static org.agrona.BitUtil.SIZE_OF_INT; import static org.agrona.BitUtil.SIZE_OF_LONG; import java.nio.ByteOrder; -import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; import java.util.Optional; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.LongFunction; import java.util.function.LongSupplier; import java.util.function.LongUnaryOperator; import java.util.function.Supplier; +import java.util.stream.Collectors; import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; import org.agrona.collections.Int2IntHashMap; import org.agrona.collections.Int2ObjectHashMap; +import org.agrona.collections.IntArrayQueue; import org.agrona.collections.IntHashSet; +import org.agrona.collections.Long2LongHashMap; import org.agrona.collections.Long2ObjectHashMap; import org.agrona.collections.LongArrayList; import org.agrona.collections.Object2LongHashMap; @@ -48,6 +54,10 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaBindingConfig; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaHeaderHelper; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaPublishMetadata.KafkaGroup; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaPublishMetadata.KafkaOffsetMetadata; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaPublishMetadata.KafkaOffsetMetadataHelper; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaPublishMetadata.KafkaTopicPartition; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaAckMode; @@ -57,6 +67,7 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaKeyFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaOffsetFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaOffsetType; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaPartitionFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttExpirySignalFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormat; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormatFW; @@ -80,16 +91,22 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaGroupBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaGroupFlushExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaInitProducerIdBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMergedDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMergedFlushExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMetaDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaOffsetFetchDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaTopicPartitionOffsetFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttResetExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttServerCapabilities; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSessionBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSessionDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSessionDataKind; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSessionFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.ResetFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.SignalFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.WindowFW; @@ -121,6 +138,7 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private static final OctetsFW EXPIRY_SIGNAL_NAME_OCTETS = new OctetsFW().wrap(EXPIRY_SIGNAL_NAME.value(), 0, EXPIRY_SIGNAL_NAME.length()); private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); + private static final String16FW DEFAULT_REASON = new String16FW(null, UTF_8); private static final int DATA_FLAG_INIT = 0x02; private static final int DATA_FLAG_FIN = 0x01; private static final int DATA_FLAG_COMPLETE = 0x03; @@ -133,9 +151,11 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private static final int WILDCARD_AVAILABLE_MASK = 1 << MqttServerCapabilities.WILDCARD.value(); private static final int SUBSCRIPTION_IDS_AVAILABLE_MASK = 1 << MqttServerCapabilities.SUBSCRIPTION_IDS.value(); private static final int SHARED_SUBSCRIPTIONS_AVAILABLE_MASK = 1 << MqttServerCapabilities.SHARED_SUBSCRIPTIONS.value(); + private static final int REDIRECT_AVAILABLE_MASK = 1 << MqttServerCapabilities.REDIRECT.value(); private static final byte MQTT_KAFKA_MAX_QOS = 2; private static final int MQTT_KAFKA_CAPABILITIES = RETAIN_AVAILABLE_MASK | WILDCARD_AVAILABLE_MASK | SUBSCRIPTION_IDS_AVAILABLE_MASK; + public static final String GROUPID_SESSION_SUFFIX = "session"; public static final Int2IntHashMap MQTT_REASON_CODES; public static final Int2ObjectHashMap MQTT_REASONS; @@ -189,12 +209,15 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final ExtensionFW extensionRO = new ExtensionFW(); private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); + private final MqttFlushExFW mqttFlushExRO = new MqttFlushExFW(); private final MqttSessionStateFW mqttSessionStateRO = new MqttSessionStateFW(); private final MqttSessionSignalFW mqttSessionSignalRO = new MqttSessionSignalFW(); private final MqttWillMessageFW mqttWillRO = new MqttWillMessageFW(); private final OctetsFW payloadRO = new OctetsFW(); private final MqttDataExFW mqttDataExRO = new MqttDataExFW(); + private final MqttResetExFW.Builder mqttResetExRW = new MqttResetExFW.Builder(); + private final MqttFlushExFW.Builder mqttFlushExRW = new MqttFlushExFW.Builder(); private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); private final KafkaDataExFW kafkaDataExRO = new KafkaDataExFW(); private final KafkaResetExFW kafkaResetExRO = new KafkaResetExFW(); @@ -237,6 +260,8 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final boolean willAvailable; private final int reconnectDelay; private final Int2ObjectHashMap qosLevels; + private final Long2ObjectHashMap clientMetadata; + private final KafkaOffsetMetadataHelper offsetMetadataHelper; private String serverRef; private int reconnectAttempt; @@ -246,7 +271,8 @@ public MqttKafkaSessionFactory( MqttKafkaConfiguration config, EngineContext context, InstanceId instanceId, - LongFunction supplyBinding) + LongFunction supplyBinding, + Long2ObjectHashMap clientMetadata) { this.kafkaTypeId = context.supplyTypeId(KAFKA_TYPE_NAME); this.mqttTypeId = context.supplyTypeId(MQTT_TYPE_NAME); @@ -281,6 +307,8 @@ public MqttKafkaSessionFactory( this.qosLevels.put(0, new String16FW("0")); this.qosLevels.put(1, new String16FW("1")); this.qosLevels.put(2, new String16FW("2")); + this.clientMetadata = clientMetadata; + this.offsetMetadataHelper = new KafkaOffsetMetadataHelper(new UnsafeBuffer(new byte[context.writeBuffer().capacity()])); } @Override @@ -296,6 +324,7 @@ public MessageConsumer newStream( final long routedId = begin.routedId(); final long initialId = begin.streamId(); final long authorization = begin.authorization(); + final long affinity = begin.affinity(); final MqttKafkaBindingConfig binding = supplyBinding.apply(routedId); @@ -307,8 +336,9 @@ public MessageConsumer newStream( { final long resolvedId = resolved.id; final String16FW sessionTopic = binding.sessionsTopic(); - newStream = new MqttSessionProxy(mqtt, originId, routedId, initialId, resolvedId, - binding.id, sessionTopic)::onMqttMessage; + final MqttSessionProxy proxy = new MqttSessionProxy(mqtt, originId, routedId, initialId, resolvedId, + binding.id, sessionTopic); + newStream = proxy::onMqttMessage; } return newStream; @@ -320,6 +350,7 @@ public void onAttached( { MqttKafkaBindingConfig binding = supplyBinding.apply(bindingId); this.serverRef = binding.options.serverRef; + if (willAvailable && coreIndex == 0) { Optional route = binding.routes.stream().findFirst(); @@ -349,15 +380,24 @@ public void onDetached( private final class MqttSessionProxy { private final MessageConsumer mqtt; + private final long resolvedId; private final long originId; private final long routedId; private final long initialId; private final long replyId; private final String16FW sessionId; private final String16FW sessionsTopic; + private final List metas; + private final List offsetFetches; + private final List initializablePartitions; + private final Long2LongHashMap leaderEpochs; + private final IntArrayQueue unackedPacketIds; + private String lifetimeId; private KafkaSessionStream session; private KafkaGroupStream group; + private KafkaInitProducerStream producerInit; + private KafkaOffsetCommitStream offsetCommit; private int state; private long initialSeq; @@ -371,12 +411,26 @@ private final class MqttSessionProxy private String16FW clientId; private String16FW clientIdMigrate; + private String memberId; + private String groupInstanceId; + private String groupHost; + private int groupPort; + private int generationId; + private int sessionExpiryMillis; private int sessionFlags; private int willPadding; private int sessionPadding; private String willId; private int delay; + private boolean redirect; + private int publishQosMax; + private int unfetchedKafkaTopics; + private MqttKafkaPublishMetadata metadata; + private final Set messagesTopics; + private final String16FW retainedTopic; + private long producerId; + private short producerEpoch; private MqttSessionProxy( MessageConsumer mqtt, @@ -390,11 +444,23 @@ private MqttSessionProxy( this.mqtt = mqtt; this.originId = originId; this.routedId = routedId; + this.resolvedId = resolvedId; this.initialId = initialId; this.replyId = supplyReplyId.applyAsLong(initialId); this.session = new KafkaFetchWillSignalStream(originId, resolvedId, this); this.sessionsTopic = sessionsTopic; this.sessionId = new String16FW(sessionIds.get(bindingId)); + this.leaderEpochs = new Long2LongHashMap(-2); + this.metas = new ArrayList<>(); + this.offsetFetches = new ArrayList<>(); + this.initializablePartitions = new ArrayList<>(); + final MqttKafkaBindingConfig binding = supplyBinding.apply(bindingId); + final String16FW messagesTopic = binding.messagesTopic(); + this.retainedTopic = binding.retainedTopic(); + this.messagesTopics = binding.routes.stream().map(r -> r.messages).collect(Collectors.toSet()); + this.messagesTopics.add(messagesTopic); + this.unfetchedKafkaTopics = messagesTopics.size() + 1; + this.unackedPacketIds = new IntArrayQueue(); } private void onMqttMessage( @@ -409,6 +475,10 @@ private void onMqttMessage( final BeginFW begin = beginRO.wrap(buffer, index, index + length); onMqttBegin(begin); break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onMqttFlush(flush); + break; case DataFW.TYPE_ID: final DataFW data = dataRO.wrap(buffer, index, index + length); onMqttData(data); @@ -463,6 +533,8 @@ private void onMqttBegin( sessionExpiryMillis = (int) SECONDS.toMillis(mqttSessionBeginEx.expiry()); sessionFlags = mqttSessionBeginEx.flags(); + redirect = hasRedirectCapability(mqttSessionBeginEx.capabilities()); + publishQosMax = mqttSessionBeginEx.publishQosMax(); if (!isSetWillFlag(sessionFlags) || isSetCleanStart(sessionFlags)) { @@ -479,6 +551,12 @@ private void onMqttBegin( willPadding += expirySignalSize; session.doKafkaBeginIfNecessary(traceId, authorization, affinity); + if (publishQosMax == 2) + { + doMqttWindow(authorization, traceId, 0, 0, 0); + this.metadata = new MqttKafkaPublishMetadata(new Long2ObjectHashMap<>(), new Int2ObjectHashMap<>(), leaderEpochs); + clientMetadata.put(affinity, metadata); + } } private void onMqttData( @@ -534,7 +612,7 @@ private void onMqttData( { String16FW willSignalKey = new String16FW.Builder() .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) - .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); + .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, UTF_8).build(); Flyweight willSignalKafkaDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) @@ -586,7 +664,7 @@ private void onMqttWillData( { lifetimeId = supplyLifetimeId.get(); } - this.willId = supplyWillId.get(); + this.willId = supplyWillId.get(); MqttWillMessageFW will = mqttWillRO.tryWrap(buffer, offset, limit); this.delay = (int) Math.min(SECONDS.toMillis(will.delay()), sessionExpiryMillis); @@ -614,7 +692,7 @@ private void onMqttWillData( int length = kafkaPayload.sizeof() + payloadSize; String16FW key = new String16FW.Builder().wrap(willKeyBuffer, 0, willKeyBuffer.capacity()) - .set(clientId.asString() + WILL_KEY_POSTFIX + lifetimeId, StandardCharsets.UTF_8).build(); + .set(clientId.asString() + WILL_KEY_POSTFIX + lifetimeId, UTF_8).build(); Flyweight kafkaDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) @@ -662,35 +740,33 @@ private void onMqttStateData( sessionPadding, flags, kafkaPayload, kafkaDataEx); } - private void doFlushProduceAndFetchWithFilter( - long traceId, - long authorization, - long budgetId) + private void onMqttFlush( + FlushFW flush) { - final KafkaFlushExFW kafkaFlushEx = - kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m.fetch(f -> - { - f.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_AND_FETCH)); - f.filtersItem(fi -> fi.conditionsItem(ci -> - ci.key(kb -> kb.length(clientId.length()) - .value(clientId.value(), 0, clientId.length())))); - f.filtersItem(fi -> - { - fi.conditionsItem(ci -> - ci.key(kb -> kb.length(clientIdMigrate.length()) - .value(clientIdMigrate.value(), 0, clientIdMigrate.length()))); - fi.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> - h.nameLen(SENDER_ID_NAME.length()) - .name(SENDER_ID_NAME.value(), 0, SENDER_ID_NAME.length()) - .valueLen(sessionId.length()) - .value(sessionId.value(), 0, sessionId.length()))))); - }); - })) - .build(); + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); - session.doKafkaFlush(traceId, authorization, budgetId, 0, kafkaFlushEx); + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + + assert initialAck <= initialSeq; + + final OctetsFW extension = flush.extension(); + final MqttFlushExFW mqttFlushEx = extension.get(mqttFlushExRO::tryWrap); + + assert mqttFlushEx.kind() == MqttFlushExFW.KIND_SESSION; + final MqttSessionFlushExFW mqttPublishFlushEx = mqttFlushEx.session(); + + final int packetId = mqttPublishFlushEx.packetId(); + + final List partitions = metadata.partitions.get(packetId); + partitions.forEach(partition -> + doCommitOffsetComplete(traceId, authorization, partition.topic, partition.partitionId, packetId)); } private void onMqttEnd( @@ -713,7 +789,7 @@ private void onMqttEnd( { // Cleanup will message + will signal String16FW key = new String16FW.Builder().wrap(willKeyBuffer, 0, willKeyBuffer.capacity()) - .set(clientId.asString() + WILL_KEY_POSTFIX + lifetimeId, StandardCharsets.UTF_8).build(); + .set(clientId.asString() + WILL_KEY_POSTFIX + lifetimeId, UTF_8).build(); Flyweight kafkaWillDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) @@ -732,7 +808,7 @@ private void onMqttEnd( String16FW willSignalKey = new String16FW.Builder() .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) - .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); + .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, UTF_8).build(); Flyweight willSignalKafkaDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) @@ -771,6 +847,18 @@ private void onMqttEnd( { group.doKafkaEnd(traceId, authorization); } + + metas.forEach(m -> m.doKafkaEnd(traceId, authorization)); + offsetFetches.forEach(o -> o.doKafkaEnd(traceId, authorization)); + + if (producerInit != null) + { + producerInit.doKafkaEnd(traceId, authorization); + } + if (offsetCommit != null) + { + offsetCommit.doKafkaEnd(traceId, authorization); + } } private void onMqttAbort( @@ -809,6 +897,16 @@ private void onMqttAbort( { group.doKafkaAbort(traceId, authorization); } + metas.forEach(m -> m.doKafkaAbort(traceId, authorization)); + offsetFetches.forEach(o -> o.doKafkaAbort(traceId, authorization)); + if (producerInit != null) + { + producerInit.doKafkaAbort(traceId, authorization); + } + if (offsetCommit != null) + { + offsetCommit.doKafkaAbort(traceId, authorization); + } } private void onMqttReset( @@ -835,6 +933,17 @@ private void onMqttReset( { group.doKafkaReset(traceId); } + + metas.forEach(m -> m.doKafkaReset(traceId)); + offsetFetches.forEach(o -> o.doKafkaReset(traceId)); + if (producerInit != null) + { + producerInit.doKafkaReset(traceId); + } + if (offsetCommit != null) + { + offsetCommit.doKafkaReset(traceId); + } } private void onMqttWindow( @@ -925,6 +1034,26 @@ private void doMqttData( assert replySeq <= replyAck + replyMax; } + private void doMqttFlush( + long traceId, + long authorization, + long budgetId, + int reserved, + int packetId) + { + if (!metadata.partitions.containsKey(packetId)) + { + final MqttFlushExFW mqttFlushEx = + mqttFlushExRW.wrap(extBuffer, FlushFW.FIELD_OFFSET_EXTENSION, extBuffer.capacity()) + .typeId(mqttTypeId) + .session(p -> p.packetId(packetId)) + .build(); + + doFlush(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization, + budgetId, reserved, mqttFlushEx); + } + } + private void doMqttAbort( long traceId, long authorization) @@ -951,6 +1080,18 @@ private void doMqttEnd( } } + private void doMqttReset( + long traceId, + Flyweight extension) + { + if (!MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); + + doReset(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, extension); + } + } + private void doMqttWindow( long authorization, long traceId, @@ -965,133 +1106,409 @@ private void doMqttWindow( traceId, authorization, budgetId, willPadding, 0, capabilities); } - private void doMqttReset( + private void openMetaStreams( long traceId, - Flyweight extension) + long authorization) { - if (!MqttKafkaState.initialClosed(state)) + messagesTopics.forEach(t -> { - state = MqttKafkaState.closeInitial(state); + final KafkaMetaStream meta = + new KafkaMetaStream(originId, resolvedId, this, t, false); + metas.add(meta); + meta.doKafkaBegin(traceId, authorization, 0); + }); - doReset(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, extension); - } + final KafkaMetaStream retainedMeta = + new KafkaMetaStream(originId, resolvedId, this, retainedTopic, true); + metas.add(retainedMeta); + retainedMeta.doKafkaBegin(traceId, authorization, 0); } - } - - public final class KafkaSignalStream - { - private MessageConsumer kafka; - private final long originId; - private final long routedId; - private final String16FW sessionsTopic; - private final String16FW messagesTopic; - private final String16FW retainedTopic; - private final Object2ObjectHashMap willFetchers; - private final Int2ObjectHashMap expiryClientIds; - private IntHashSet partitions; - private int state; + private void onSessionBegin( + long traceId, + long authorization, + long affinity) + { + if (publishQosMax != 2) + { + Flyweight mqttBeginEx = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) + .typeId(mqttTypeId) + .session(sessionBuilder -> sessionBuilder + .flags(sessionFlags) + .expiry((int) TimeUnit.MILLISECONDS.toSeconds(sessionExpiryMillis)) + .subscribeQosMax(MQTT_KAFKA_MAX_QOS) + .capabilities(MQTT_KAFKA_CAPABILITIES) + .clientId(clientId)) + .build(); - private long initialId; - private long replyId; - private long replySeq; - private long replyAck; - private int replyMax; - private long reconnectAt; - private int decodeSlot = NO_SLOT; - private int decodeSlotOffset; + doMqttBegin(traceId, authorization, affinity, mqttBeginEx); + } + } - private KafkaSignalStream( - long originId, - long routedId, - String16FW sessionsTopic, - String16FW messagesTopic, - String16FW retainedTopic) + private void onSessionBecomesLeader( + long traceId, + long authorization, + int members, + String memberId, + int generationId) { - this.originId = originId; - this.routedId = routedId; - this.sessionsTopic = sessionsTopic; - this.messagesTopic = messagesTopic; - this.retainedTopic = retainedTopic; - this.willFetchers = new Object2ObjectHashMap<>(); - this.expiryClientIds = new Int2ObjectHashMap<>(); - this.partitions = new IntHashSet(); + if (members > 1) + { + session.sendMigrateSignal(traceId, authorization); + session.sendWillSignal(traceId, authorization); + session.doKafkaEnd(traceId, authorization); + group.doKafkaEnd(traceId, authorization); + } + else + { + session.doKafkaEnd(traceId, authorization); + if (publishQosMax < 2) + { + final long routedId = session.routedId; + session = new KafkaSessionStateProxy(originId, routedId, this); + session.doKafkaBeginIfNecessary(traceId, authorization, 0); + } + else + { + this.memberId = memberId; + this.generationId = generationId; + final String groupId = String.format("%s-%s", clientId.asString(), GROUPID_SESSION_SUFFIX); + this.metadata.group = new KafkaGroup(groupInstanceId, groupId, + memberId, groupHost, groupPort, generationId); + openMetaStreams(traceId, authorization); + } + } } - private void doKafkaBegin( - long timeMillis) + private void onPartitionsFetched( + long traceId, + long authorization, + String16FW topic, + Array32FW partitions, + KafkaMetaStream meta) { - this.reconnectAt = signaler.signalAt( - timeMillis, - SIGNAL_CONNECT_WILL_STREAM, - this::onSignalConnectWillStream); + doFetchOffsetMetadata(traceId, authorization, topic, partitions); + metas.remove(meta); } - private void doKafkaBegin( + private void onOffsetFetched( long traceId, long authorization, - long affinity) + String topic, + Array32FW partitions, + KafkaOffsetFetchStream kafkaOffsetFetchStream) { - assert state == 0; + boolean initProducer = !partitions.anyMatch(p -> p.metadata().length() > 0); - state = MqttKafkaState.openingInitial(state); + partitions.forEach(partition -> + { + final long offset = partition.partitionOffset(); + final String16FW metadata = partition.metadata(); + final int partitionId = partition.partitionId(); + final long partitionKey = partitionKey(topic, partitionId); - kafka = newSignalStream(this::onSignalMessage, originId, routedId, initialId, 0, 0, 0, - traceId, authorization, affinity, sessionsTopic); + leaderEpochs.put(partitionKey, partition.leaderEpoch()); + + KafkaOffsetMetadata offsetMetadata; + if (!initProducer) + { + offsetMetadata = offsetMetadataHelper.stringToOffsetMetadata(metadata); + offsetMetadata.sequence = offset; + if (offsetCommit == null) + { + onProducerInit(traceId, authorization); + } + this.metadata.offsets.put(partitionKey, offsetMetadata); + offsetMetadata.packetIds.forEach(p -> this.metadata.partitions.computeIfAbsent(p, ArrayList::new) + .add(new KafkaTopicPartition(topic, partitionId))); + } + else + { + initializablePartitions.add(new KafkaTopicPartition(topic, partition.partitionId())); + } + }); + + unfetchedKafkaTopics--; + + if (unfetchedKafkaTopics == 0 && initProducer) + { + final long routedId = session.routedId; + producerInit = new KafkaInitProducerStream(originId, routedId, this); + producerInit.doKafkaBegin(traceId, authorization, 0); + } + else if (unfetchedKafkaTopics == 0) + { + doCreateSessionStream(traceId, authorization); + } + offsetFetches.remove(kafkaOffsetFetchStream); } - private void doKafkaEnd( + private void onGroupJoined( + String instanceId, + String host, + int port, + int sessionExpiryMillisInRange) + { + this.groupInstanceId = instanceId; + this.groupHost = host; + this.groupPort = port; + if (this.sessionExpiryMillis != sessionExpiryMillisInRange) + { + this.sessionExpiryMillis = sessionExpiryMillisInRange; + } + } + + private void onProducerInit( + long traceId, + long authorization, + long producerId, + short producerEpoch) + { + producerInit = null; + this.producerId = producerId; + this.producerEpoch = producerEpoch; + onProducerInit(traceId, authorization); + } + + private void onProducerInit( long traceId, long authorization) { - if (!MqttKafkaState.initialClosed(state)) - { - state = MqttKafkaState.closeInitial(state); + final long routedId = session.routedId; + offsetCommit = new KafkaOffsetCommitStream(originId, routedId, this, groupHost, groupPort); + offsetCommit.doKafkaBegin(traceId, authorization, 0); + } - doEnd(kafka, originId, routedId, initialId, 0, 0, 0, traceId, authorization); + private void onOffsetCommitOpened( + long traceId, + long authorization, + long budgetId) + { + if (!initializablePartitions.isEmpty()) + { + initializablePartitions.forEach(kp -> + { + final long partitionKey = partitionKey(kp.topic, kp.partitionId); + final KafkaOffsetMetadata metadata = new KafkaOffsetMetadata(producerId, producerEpoch); + this.metadata.offsets.put(partitionKey, metadata); + Flyweight initialOffsetCommit = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .offsetCommit(o -> o + .topic(kp.topic) + .progress(p -> p + .partitionId(kp.partitionId) + .partitionOffset(metadata.sequence) + .metadata(offsetMetadataHelper.offsetMetadataToString(metadata))) + .generationId(generationId) + .leaderEpoch((int) leaderEpochs.get(partitionKey))) + .build(); - signaler.cancel(reconnectAt); - reconnectAt = NO_CANCEL_ID; + offsetCommit.doKafkaData(traceId, authorization, budgetId, DATA_FLAG_COMPLETE, initialOffsetCommit); + }); } } - private void doKafkaAbort( + private void onOffsetCommitAck( long traceId, long authorization) { - if (!MqttKafkaState.initialClosed(state)) + if (initializablePartitions.isEmpty()) { - state = MqttKafkaState.closeInitial(state); + final int packetId = unackedPacketIds.remove(); + if (metadata.partitions.containsKey(packetId)) + { + final List partitions = metadata.partitions.get(packetId); + partitions.remove(0); + if (partitions.isEmpty()) + { + metadata.partitions.remove(packetId); + } + } - doAbort(kafka, originId, routedId, initialId, 0, 0, 0, traceId, authorization); + doMqttFlush(traceId, authorization, 0, 0, packetId); + } + else + { + onInitialOffsetCommitAck(traceId, authorization); } } - private void onSignalMessage( - int msgTypeId, - DirectBuffer buffer, - int index, - int length) + private void onInitialOffsetCommitAck( + long traceId, + long authorization) { - switch (msgTypeId) + initializablePartitions.remove(0); + if (initializablePartitions.isEmpty()) { - case BeginFW.TYPE_ID: - final BeginFW begin = beginRO.wrap(buffer, index, index + length); - onKafkaBegin(begin); - break; - case DataFW.TYPE_ID: - final DataFW data = dataRO.wrap(buffer, index, index + length); - onKafkaData(data); - break; - case FlushFW.TYPE_ID: - final FlushFW flush = flushRO.wrap(buffer, index, index + length); - onKafkaFlush(flush); - break; - case EndFW.TYPE_ID: - final EndFW end = endRO.wrap(buffer, index, index + length); - onKafkaEnd(end); - break; + doCreateSessionStream(traceId, authorization); + } + } + + private void doFetchOffsetMetadata( + long traceId, + long authorization, + String16FW topic, + Array32FW partitions) + { + final String topic0 = topic.asString(); + + final KafkaOffsetFetchStream offsetFetch = + new KafkaOffsetFetchStream(originId, resolvedId, this, groupHost, groupPort, topic0, partitions); + offsetFetches.add(offsetFetch); + offsetFetch.doKafkaBegin(traceId, authorization, 0); + } + + private void doCommitOffsetComplete( + long traceId, + long authorization, + String topic, + int partitionId, + int packetId) + { + final long partitionKey = partitionKey(topic, partitionId); + final KafkaOffsetMetadata offsetMetadata = metadata.offsets.get(partitionKey); + offsetMetadata.packetIds.remove((Integer) packetId); + offsetMetadata.sequence++; + Flyweight offsetCommitEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .offsetCommit(o -> o + .topic(topic) + .progress(p -> p + .partitionId(partitionId) + .partitionOffset(offsetMetadata.sequence) + .metadata(offsetMetadataHelper.offsetMetadataToString(offsetMetadata))) + .generationId(generationId) + .leaderEpoch((int) leaderEpochs.get(partitionKey))) + .build(); + + unackedPacketIds.add(packetId); + offsetCommit.doKafkaData(traceId, authorization, 0, DATA_FLAG_COMPLETE, offsetCommitEx); + } + + private void doFlushProduceAndFetchWithFilter( + long traceId, + long authorization, + long budgetId) + { + final KafkaFlushExFW kafkaFlushEx = + kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.fetch(f -> + { + f.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_AND_FETCH)); + f.filtersItem(fi -> fi.conditionsItem(ci -> + ci.key(kb -> kb.length(clientId.length()) + .value(clientId.value(), 0, clientId.length())))); + f.filtersItem(fi -> + { + fi.conditionsItem(ci -> + ci.key(kb -> kb.length(clientIdMigrate.length()) + .value(clientIdMigrate.value(), 0, clientIdMigrate.length()))); + fi.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> + h.nameLen(SENDER_ID_NAME.length()) + .name(SENDER_ID_NAME.value(), 0, SENDER_ID_NAME.length()) + .valueLen(sessionId.length()) + .value(sessionId.value(), 0, sessionId.length()))))); + }); + })) + .build(); + + session.doKafkaFlush(traceId, authorization, budgetId, 0, kafkaFlushEx); + } + + private void doCreateSessionStream( + long traceId, + long authorization) + { + Flyweight mqttBeginEx = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) + .typeId(mqttTypeId) + .session(sessionBuilder -> + { + sessionBuilder + .flags(sessionFlags) + .expiry((int) TimeUnit.MILLISECONDS.toSeconds(sessionExpiryMillis)) + .subscribeQosMax(MQTT_KAFKA_MAX_QOS) + .capabilities(MQTT_KAFKA_CAPABILITIES) + .clientId(clientId); + + metadata.offsets.values().forEach(o -> + o.packetIds.forEach(p -> sessionBuilder.appendPacketIds(p.shortValue()))); + }).build(); + + doMqttBegin(traceId, authorization, 0, mqttBeginEx); + session = new KafkaSessionStateProxy(originId, resolvedId, this); + session.doKafkaBeginIfNecessary(traceId, authorization, 0); + } + } + + public final class KafkaSignalStream + { + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final String16FW sessionsTopic; + private final String16FW messagesTopic; + private final String16FW retainedTopic; + private final Object2ObjectHashMap willFetchers; + private final Int2ObjectHashMap expiryClientIds; + + private IntHashSet partitions; + private int state; + + private long initialId; + private long replyId; + private long replySeq; + private long replyAck; + private int replyMax; + private long reconnectAt; + private int decodeSlot = NO_SLOT; + private int decodeSlotOffset; + + private KafkaSignalStream( + long originId, + long routedId, + String16FW sessionsTopic, + String16FW messagesTopic, + String16FW retainedTopic) + { + this.originId = originId; + this.routedId = routedId; + this.sessionsTopic = sessionsTopic; + this.messagesTopic = messagesTopic; + this.retainedTopic = retainedTopic; + this.willFetchers = new Object2ObjectHashMap<>(); + this.expiryClientIds = new Int2ObjectHashMap<>(); + this.partitions = new IntHashSet(); + + } + + private void onSignalMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onKafkaFlush(flush); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; case AbortFW.TYPE_ID: final AbortFW abort = abortRO.wrap(buffer, index, index + length); onKafkaAbort(abort); @@ -1107,41 +1524,6 @@ private void onSignalMessage( } } - private void onSignal(SignalFW signal) - { - final int signalId = signal.signalId(); - - switch (signalId) - { - case SIGNAL_EXPIRE_SESSION: - onKafkaSessionExpirySignal(signal); - break; - default: - break; - } - } - - private void onKafkaSessionExpirySignal( - SignalFW signal) - { - String16FW clientId = expiryClientIds.get(signal.contextId()); - - Flyweight expireSessionKafkaDataEx = kafkaDataExRW - .wrap(extBuffer, 0, extBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m.produce(mp -> mp - .deferred(0) - .timestamp(System.currentTimeMillis()) - .partition(p -> p.partitionId(-1).partitionOffset(-1)) - .key(b -> b.length(clientId.length()) - .value(clientId.value(), 0, clientId.length())) - .hashKey(b -> b.length(clientId.length()) - .value(clientId.value(), 0, clientId.length())))) - .build(); - - doKafkaData(supplyTraceId.get(), 0, expireSessionKafkaDataEx); - } - private void onKafkaBegin( BeginFW begin) { @@ -1321,11 +1703,12 @@ private void onKafkaFlush( { final long sequence = flush.sequence(); final long acknowledge = flush.acknowledge(); + final long reserved = flush.reserved(); assert acknowledge <= sequence; assert sequence >= replySeq; - replySeq = sequence; + replySeq = sequence + reserved; assert replyAck <= replySeq; @@ -1349,35 +1732,6 @@ private void onKafkaFlush( } } - private void onSignalConnectWillStream( - int signalId) - { - assert signalId == SIGNAL_CONNECT_WILL_STREAM; - - this.reconnectAt = NO_CANCEL_ID; - - reconnectAttempt = 0; - state = 0; - replySeq = 0; - replyAck = 0; - - this.initialId = supplyInitialId.applyAsLong(routedId); - this.replyId = supplyReplyId.applyAsLong(initialId); - - if (decodeSlot != NO_SLOT) - { - bufferPool.release(decodeSlot); - decodeSlot = NO_SLOT; - decodeSlotOffset = 0; - } - final long traceId = supplyTraceId.get(); - - willFetchers.values().forEach(f -> f.cleanup(traceId, 0L)); - willFetchers.clear(); - - doKafkaBegin(traceId, 0, 0); - } - private void onKafkaEnd( EndFW end) { @@ -1464,51 +1818,166 @@ private void onKafkaReset( } } - private void doKafkaReset( - long traceId) + private void onSignal( + SignalFW signal) { - if (!MqttKafkaState.replyClosed(state)) - { - state = MqttKafkaState.closeReply(state); + final int signalId = signal.signalId(); - doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); + switch (signalId) + { + case SIGNAL_EXPIRE_SESSION: + onKafkaSessionExpirySignal(signal); + break; + default: + break; } } - private void doKafkaWindow( - long traceId, - long authorization, - long budgetId, - int padding, - int capabilities) + private void onKafkaSessionExpirySignal( + SignalFW signal) { - replyMax = 8192; + String16FW clientId = expiryClientIds.get(signal.contextId()); - doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, budgetId, padding, 0, capabilities); - } + Flyweight expireSessionKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.produce(mp -> mp + .deferred(0) + .timestamp(System.currentTimeMillis()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length())) + .hashKey(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length())))) + .build(); + doKafkaData(supplyTraceId.get(), 0, expireSessionKafkaDataEx); + } - private void doKafkaData( - long traceId, - long authorization, - Flyweight extension) + private void onSignalConnectWillStream( + int signalId) { + assert signalId == SIGNAL_CONNECT_WILL_STREAM; - doData(kafka, originId, routedId, initialId, 0, 0, 0, - traceId, authorization, 0, DATA_FLAG_COMPLETE, 0, null, extension); - } - } + this.reconnectAt = NO_CANCEL_ID; - private final class KafkaFetchWillStream - { - private final KafkaSignalStream delegate; - private final String16FW topic; - private final String16FW clientId; - private final String lifetimeId; - private final String willId; - private final long deliverAt; - private MessageConsumer kafka; + reconnectAttempt = 0; + state = 0; + replySeq = 0; + replyAck = 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + if (decodeSlot != NO_SLOT) + { + bufferPool.release(decodeSlot); + decodeSlot = NO_SLOT; + decodeSlotOffset = 0; + } + final long traceId = supplyTraceId.get(); + + willFetchers.values().forEach(f -> f.cleanup(traceId, 0L)); + willFetchers.clear(); + + doKafkaBegin(traceId, 0, 0); + } + + + private void doKafkaBegin( + long timeMillis) + { + this.reconnectAt = signaler.signalAt( + timeMillis, + SIGNAL_CONNECT_WILL_STREAM, + this::onSignalConnectWillStream); + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + assert state == 0; + + state = MqttKafkaState.openingInitial(state); + + kafka = newSignalStream(this::onSignalMessage, originId, routedId, initialId, 0, 0, 0, + traceId, authorization, affinity, sessionsTopic); + } + + private void doKafkaEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, 0, 0, 0, traceId, authorization); + + signaler.cancel(reconnectAt); + reconnectAt = NO_CANCEL_ID; + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, 0, 0, 0, traceId, authorization); + } + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + replyMax = 8192; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, 0, capabilities); + } + + + private void doKafkaData( + long traceId, + long authorization, + Flyweight extension) + { + + doData(kafka, originId, routedId, initialId, 0, 0, 0, + traceId, authorization, 0, DATA_FLAG_COMPLETE, 0, null, extension); + } + } + + private final class KafkaFetchWillStream + { + private final KafkaSignalStream delegate; + private final String16FW topic; + private final String16FW clientId; + private final String lifetimeId; + private final String willId; + private final long deliverAt; + private MessageConsumer kafka; private final long originId; private final long routedId; private final long initialId; @@ -1555,64 +2024,6 @@ private KafkaFetchWillStream( this.deliverAt = deliverAt; } - private void doKafkaBegin( - long traceId, - long authorization, - long affinity, - String16FW lifetimeId) - { - if (!MqttKafkaState.initialOpening(state)) - { - state = MqttKafkaState.openingInitial(state); - - kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, clientId, lifetimeId, topic); - } - } - - private void cleanup( - long traceId, - long authorization) - { - doKafkaEnd(traceId, authorization); - if (willProducer != null) - { - willProducer.doKafkaEnd(traceId, authorization); - } - if (willRetainProducer != null) - { - willRetainProducer.doKafkaEnd(traceId, authorization); - } - bufferPool.release(dataSlot); - dataSlot = NO_SLOT; - messageSlotOffset = 0; - } - - private void doKafkaEnd( - long traceId, - long authorization) - { - if (!MqttKafkaState.initialClosed(state)) - { - state = MqttKafkaState.closeInitial(state); - - doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); - delegate.willFetchers.remove(clientId); - } - } - - private void doKafkaAbort( - long traceId, - long authorization) - { - if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.initialClosed(state)) - { - state = MqttKafkaState.closeInitial(state); - - doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); - } - } - private void onKafkaMessage( int msgTypeId, DirectBuffer buffer, @@ -1763,11 +2174,12 @@ private void onKafkaFlush( final long acknowledge = flush.acknowledge(); final long traceId = flush.traceId(); final long authorization = flush.authorization(); + final long reserved = flush.reserved(); assert acknowledge <= sequence; assert sequence >= replySeq; - replySeq = sequence; + replySeq = sequence + reserved; assert replyAck <= replySeq; @@ -1790,31 +2202,6 @@ private void onKafkaWindow( assert initialAck <= initialSeq; } - private void doKafkaReset( - long traceId) - { - if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.replyClosed(state)) - { - state = MqttKafkaState.closeReply(state); - - doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); - } - } - - private void doKafkaWindow( - long traceId, - long authorization, - long budgetId, - int padding, - int capabilities) - { - replyAck = replySeq; - replyMax = bufferPool.slotCapacity(); - - doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, budgetId, padding, replyPad, capabilities); - } - private void onWillMessageAcked( long traceId, long authorization) @@ -1827,7 +2214,7 @@ private void onWillMessageAcked( // Cleanup will message + will signal String16FW key = new String16FW.Builder().wrap(willKeyBuffer, 0, willKeyBuffer.capacity()) - .set(clientId.asString() + WILL_KEY_POSTFIX + lifetimeId, StandardCharsets.UTF_8).build(); + .set(clientId.asString() + WILL_KEY_POSTFIX + lifetimeId, UTF_8).build(); Flyweight kafkaWillDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) @@ -1845,7 +2232,7 @@ private void onWillMessageAcked( String16FW willSignalKey = new String16FW.Builder() .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) - .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); + .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, UTF_8).build(); Flyweight willSignalKafkaDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) @@ -1869,97 +2256,55 @@ private void onWillMessageAcked( doKafkaEnd(traceId, authorization); } } - } - - private final class KafkaProduceWillStream - { - private MessageConsumer kafka; - private final long originId; - private final long routedId; - private final long initialId; - private final String16FW kafkaTopic; - private final long deliverAt; - private final long replyId; - private final KafkaFetchWillStream delegate; - private final int flags; - private final int qos; - - private int state; - - private long initialSeq; - private long initialAck; - private int initialMax; - - private long replySeq; - private long replyAck; - private int replyMax; - private int replyPad; - - private KafkaProduceWillStream( - long originId, - long routedId, - KafkaFetchWillStream delegate, - String16FW kafkaTopic, - int qos, - long deliverAt, - int flags) - { - this.originId = originId; - this.routedId = routedId; - this.delegate = delegate; - this.initialId = supplyInitialId.applyAsLong(routedId); - this.kafkaTopic = kafkaTopic; - this.qos = qos; - this.deliverAt = deliverAt; - this.replyId = supplyReplyId.applyAsLong(initialId); - this.flags = flags; - } private void doKafkaBegin( long traceId, long authorization, - long affinity) + long affinity, + String16FW lifetimeId) { - initialSeq = delegate.initialSeq; - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; - state = MqttKafkaState.openingInitial(state); - - kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, kafkaTopic, qos); + if (!MqttKafkaState.initialOpening(state)) + { + state = MqttKafkaState.openingInitial(state); + + kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, clientId, lifetimeId, topic); + } } - private void doKafkaData( + private void doKafkaEnd( long traceId, - long authorization, - long budgetId, - int reserved, - int flags, - OctetsFW payload, - Flyweight extension) + long authorization) { - if ((flags & DATA_FLAG_FIN) != 0) + if (!MqttKafkaState.initialClosed(state)) { - willDeliverIds.remove(delegate.clientId); - } - - doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, flags, reserved, payload, extension); - - initialSeq += reserved; + state = MqttKafkaState.closeInitial(state); - assert initialSeq <= initialAck + initialMax; + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + delegate.willFetchers.remove(clientId); + } } - private void doKafkaEnd( + private void doKafkaAbort( long traceId, long authorization) { - if (!MqttKafkaState.initialClosed(state)) + if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.initialClosed(state)) { state = MqttKafkaState.closeInitial(state); - doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaReset( + long traceId) + { + if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); } } @@ -1970,10 +2315,76 @@ private void doKafkaWindow( int padding, int capabilities) { + replyAck = replySeq; + replyMax = bufferPool.slotCapacity(); + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization, budgetId, padding, replyPad, capabilities); } + private void cleanup( + long traceId, + long authorization) + { + doKafkaEnd(traceId, authorization); + if (willProducer != null) + { + willProducer.doKafkaEnd(traceId, authorization); + } + if (willRetainProducer != null) + { + willRetainProducer.doKafkaEnd(traceId, authorization); + } + bufferPool.release(dataSlot); + dataSlot = NO_SLOT; + messageSlotOffset = 0; + } + } + + private final class KafkaProduceWillStream + { + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final String16FW kafkaTopic; + private final long deliverAt; + private final long replyId; + private final KafkaFetchWillStream delegate; + private final int flags; + private final int qos; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaProduceWillStream( + long originId, + long routedId, + KafkaFetchWillStream delegate, + String16FW kafkaTopic, + int qos, + long deliverAt, + int flags) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.kafkaTopic = kafkaTopic; + this.qos = qos; + this.deliverAt = deliverAt; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.flags = flags; + } + private void onKafkaMessage( int msgTypeId, DirectBuffer buffer, @@ -2001,21 +2412,6 @@ private void onKafkaMessage( } } - private void onKafkaSignal( - SignalFW signal) - { - final int signalId = signal.signalId(); - - switch (signalId) - { - case SIGNAL_DELIVER_WILL_MESSAGE: - onWillDeliverSignal(signal); - break; - default: - break; - } - } - private void onKafkaBegin( BeginFW begin) { @@ -2059,6 +2455,28 @@ private void onKafkaData( doKafkaReset(traceId); } + private void onKafkaSignal( + SignalFW signal) + { + final int signalId = signal.signalId(); + + switch (signalId) + { + case SIGNAL_DELIVER_WILL_MESSAGE: + onWillDeliverSignal(signal); + break; + default: + break; + } + } + + + private void onWillDeliverSignal( + SignalFW signal) + { + sendWill(signal.traceId(), signal.authorization(), 0); + } + private void onKafkaWindow( WindowFW window) { @@ -2095,10 +2513,74 @@ private void onKafkaWindow( } } + private void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.openingInitial(state); + + kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, kafkaTopic, qos); + } + + private void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + if ((flags & DATA_FLAG_FIN) != 0) + { + willDeliverIds.remove(delegate.clientId); + } + + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } - private void onWillDeliverSignal(SignalFW signal) + private void doKafkaEnd( + long traceId, + long authorization) { - sendWill(signal.traceId(), signal.authorization(), 0); + if (!MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); + } } private void sendWill( @@ -2194,17 +2676,6 @@ private void sendWill( delegate.doKafkaWindow(traceId, authorization, 0, 0, 0); } - private void doKafkaReset( - long traceId) - { - if (!MqttKafkaState.replyClosed(state)) - { - state = MqttKafkaState.closeReply(state); - - doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); - } - } - private void addHeader( OctetsFW key, OctetsFW value) @@ -2307,16 +2778,31 @@ private static int indexOfByte( return byteAt; } + private static boolean hasRedirectCapability( + int flags) + { + return (flags & REDIRECT_AVAILABLE_MASK) != 0; + } + + + private static long partitionKey( + String topic, + int partitionId) + { + final int topicHashCode = System.identityHashCode(topic.intern()); + return ((long) topicHashCode << 32) | (partitionId & 0xFFFFFFFFL); + } + private static boolean isSetWillFlag( int flags) { - return (flags & MqttSessionFlags.WILL.value() << 1) != 0; + return (flags & 1 << MqttSessionFlags.WILL.value()) != 0; } private static boolean isSetCleanStart( int flags) { - return (flags & MqttSessionFlags.CLEAN_START.value() << 1) != 0; + return (flags & 1 << MqttSessionFlags.CLEAN_START.value()) != 0; } private abstract class KafkaSessionStream @@ -2351,225 +2837,13 @@ private KafkaSessionStream( this.replyId = supplyReplyId.applyAsLong(initialId); } - private void doKafkaBeginIfNecessary( - long traceId, - long authorization, - long affinity) + private void onKafkaMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) { - if (!MqttKafkaState.initialOpening(state)) - { - doKafkaBegin(traceId, authorization, affinity); - } - } - - protected final void doKafkaData( - long traceId, - long authorization, - long budgetId, - int reserved, - int padding, - int flags, - DirectBuffer buffer, - int offset, - int limit, - Flyweight extension) - { - - doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, flags, reserved, buffer, offset, limit, extension); - - initialSeq += reserved; - - assert initialSeq - padding <= initialAck + initialMax; - } - - protected final void doKafkaData( - long traceId, - long authorization, - long budgetId, - int reserved, - int flags, - OctetsFW payload, - Flyweight extension) - { - doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, flags, reserved, payload, extension); - - initialSeq += reserved; - - assert initialSeq <= initialAck + initialMax; - } - - protected final void cancelExpirySignal( - long authorization, - long traceId) - { - String16FW expirySignalKey = new String16FW.Builder() - .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) - .set(delegate.clientId.asString() + EXPIRY_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); - Flyweight expirySignalKafkaDataEx = kafkaDataExRW - .wrap(extBuffer, 0, extBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m.produce(mp -> mp - .deferred(0) - .timestamp(System.currentTimeMillis()) - .partition(p -> p.partitionId(-1).partitionOffset(-1)) - .key(b -> b.length(expirySignalKey.length()) - .value(expirySignalKey.value(), 0, expirySignalKey.length())) - .hashKey(b -> b.length(delegate.clientId.length()) - .value(delegate.clientId.value(), 0, delegate.clientId.length())) - .headersItem(h -> - h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) - .name(TYPE_HEADER_NAME_OCTETS) - .valueLen(EXPIRY_SIGNAL_NAME_OCTETS.sizeof()) - .value(EXPIRY_SIGNAL_NAME_OCTETS)))) - .build(); - - doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, - null, expirySignalKafkaDataEx); - } - - protected final void sendExpirySignal( - long authorization, - long traceId, - Flyweight payload) - { - String16FW expirySignalKey = new String16FW.Builder() - .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) - .set(delegate.clientId.asString() + EXPIRY_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); - Flyweight expirySignalKafkaDataEx = kafkaDataExRW - .wrap(extBuffer, 0, extBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m.produce(mp -> mp - .deferred(0) - .timestamp(System.currentTimeMillis()) - .partition(p -> p.partitionId(-1).partitionOffset(-1)) - .key(b -> b.length(expirySignalKey.length()) - .value(expirySignalKey.value(), 0, expirySignalKey.length())) - .hashKey(b -> b.length(delegate.clientId.length()) - .value(delegate.clientId.value(), 0, delegate.clientId.length())) - .headersItem(h -> - h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) - .name(TYPE_HEADER_NAME_OCTETS) - .valueLen(EXPIRY_SIGNAL_NAME_OCTETS.sizeof()) - .value(EXPIRY_SIGNAL_NAME_OCTETS)))) - .build(); - - doKafkaData(traceId, authorization, 0, payload.sizeof(), delegate.sessionPadding, DATA_FLAG_COMPLETE, - payload, expirySignalKafkaDataEx); - } - - private void sendWillSignal( - long traceId, - long authorization) - { - String16FW willSignalKey = new String16FW.Builder() - .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) - .set(delegate.clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); - Flyweight willSignalKafkaDataEx = kafkaDataExRW - .wrap(extBuffer, 0, extBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m.produce(mp -> mp - .deferred(0) - .timestamp(System.currentTimeMillis()) - .partition(p -> p.partitionId(-1).partitionOffset(-1)) - .key(b -> b.length(willSignalKey.length()) - .value(willSignalKey.value(), 0, willSignalKey.length())) - .hashKey(b -> b.length(delegate.clientId.length()) - .value(delegate.clientId.value(), 0, delegate.clientId.length())) - .headersItem(h -> - h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) - .name(TYPE_HEADER_NAME_OCTETS) - .valueLen(WILL_SIGNAL_NAME_OCTETS.sizeof()) - .value(WILL_SIGNAL_NAME_OCTETS)))) - .build(); - - final MqttSessionSignalFW willSignal = - mqttSessionSignalRW.wrap(sessionSignalBuffer, 0, sessionSignalBuffer.capacity()) - .will(w -> w - .instanceId(instanceId.instanceId()) - .clientId(delegate.clientId) - .delay(delegate.delay) - .deliverAt(supplyTime.getAsLong() + delegate.delay) - .lifetimeId(delegate.lifetimeId) - .willId(delegate.willId)) - .build(); - - doKafkaData(traceId, authorization, 0, willSignal.sizeof(), delegate.sessionPadding, DATA_FLAG_COMPLETE, - willSignal, willSignalKafkaDataEx); - } - - protected void doKafkaData( - long traceId, - long authorization, - long budgetId, - int reserved, - int padding, - int flags, - Flyweight payload, - Flyweight extension) - { - final DirectBuffer buffer = payload.buffer(); - final int offset = payload.offset(); - final int limit = payload.limit(); - final int length = limit - offset; - - doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, flags, reserved, buffer, offset, length, extension); - - initialSeq += reserved; - - assert initialSeq - padding <= initialAck + initialMax; - } - - private void doKafkaFlush( - long traceId, - long authorization, - long budgetId, - int reserved, - Flyweight extension) - { - doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, reserved, extension); - } - - private void doKafkaEnd( - long traceId, - long authorization) - { - if (!MqttKafkaState.initialClosed(state)) - { - initialSeq = delegate.initialSeq; - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; - state = MqttKafkaState.closeInitial(state); - - doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); - } - } - - private void doKafkaAbort( - long traceId, - long authorization) - { - if (!MqttKafkaState.initialClosed(state)) - { - initialSeq = delegate.initialSeq; - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; - state = MqttKafkaState.closeInitial(state); - - doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); - } - } - - private void onKafkaMessage( - int msgTypeId, - DirectBuffer buffer, - int index, - int length) - { - switch (msgTypeId) + switch (msgTypeId) { case BeginFW.TYPE_ID: final BeginFW begin = beginRO.wrap(buffer, index, index + length); @@ -2630,7 +2904,7 @@ private void onKafkaBegin( .session(sessionBuilder -> sessionBuilder .flags(delegate.sessionFlags) .expiry((int) TimeUnit.MILLISECONDS.toSeconds(delegate.sessionExpiryMillis)) - .qosMax(MQTT_KAFKA_MAX_QOS) + .subscribeQosMax(MQTT_KAFKA_MAX_QOS) .capabilities(MQTT_KAFKA_CAPABILITIES) .clientId(delegate.clientId)) .build(); @@ -2663,34 +2937,16 @@ private void onKafkaData( } else { - handleKafkaData(data); + onKafkaDataImpl(data); } } - protected abstract void doKafkaBegin( - long traceId, - long authorization, - long affinity); - - protected abstract void handleKafkaData( + protected abstract void onKafkaDataImpl( DataFW data); - protected void onKafkaWindow( - WindowFW window) + protected void onKafkaFlush( + FlushFW flush) { - final long sequence = window.sequence(); - final long acknowledge = window.acknowledge(); - final int maximum = window.maximum(); - - assert acknowledge <= sequence; - assert acknowledge >= delegate.initialAck; - assert maximum >= delegate.initialMax; - - initialAck = acknowledge; - initialMax = maximum; - state = MqttKafkaState.openInitial(state); - - assert initialAck <= initialSeq; } protected void onKafkaEnd( @@ -2698,11 +2954,6 @@ protected void onKafkaEnd( { } - protected void onKafkaFlush( - FlushFW flush) - { - } - private void onKafkaAbort( AbortFW abort) { @@ -2722,31 +2973,6 @@ private void onKafkaAbort( delegate.doMqttAbort(traceId, authorization); } - protected void sendMigrateSignal( - long traceId, - long authorization) - { - Flyweight kafkaMigrateDataEx = kafkaDataExRW - .wrap(extBuffer, 0, extBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m.produce(mp -> mp - .deferred(0) - .timestamp(System.currentTimeMillis()) - .partition(p -> p.partitionId(-1).partitionOffset(-1)) - .key(b -> b.length(delegate.clientIdMigrate.length()) - .value(delegate.clientIdMigrate.value(), 0, delegate.clientIdMigrate.length())) - .hashKey(b -> b.length(delegate.clientId.length()) - .value(delegate.clientId.value(), 0, delegate.clientId.length())) - .headersItem(c -> c.nameLen(SENDER_ID_NAME.length()) - .name(SENDER_ID_NAME.value(), 0, SENDER_ID_NAME.length()) - .valueLen(delegate.sessionId.length()) - .value(delegate.sessionId.value(), 0, delegate.sessionId.length())))) - .build(); - - doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, - EMPTY_OCTETS, kafkaMigrateDataEx); - } - protected void onKafkaReset( ResetFW reset) { @@ -2777,89 +3003,12 @@ protected void onKafkaReset( delegate.doMqttReset(traceId, mqttResetEx); } - private void doKafkaReset( - long traceId) - { - if (!MqttKafkaState.replyClosed(state)) - { - state = MqttKafkaState.closeReply(state); - - doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); - } - } - - private void doKafkaWindow( - long traceId, - long authorization, - long budgetId, - int capabilities) - { - replyAck = delegate.replyAck; - replyMax = delegate.replyMax; - replyPad = delegate.replyPad; - - doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, budgetId, replyPad, 0, capabilities); - } - } - - private final class KafkaSessionSignalStream extends KafkaSessionStream - { - private KafkaSessionSignalStream( - long originId, - long routedId, - MqttSessionProxy delegate) - { - super(originId, routedId, delegate); - } - - @Override - protected void doKafkaBegin(long traceId, long authorization, long affinity) - { - assert state == 0; - - this.initialId = supplyInitialId.applyAsLong(routedId); - this.replyId = supplyReplyId.applyAsLong(initialId); - - state = MqttKafkaState.openingInitial(state); - - kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, delegate.sessionsTopic, null, delegate.clientIdMigrate, - delegate.sessionId, serverRef, KafkaCapabilities.PRODUCE_AND_FETCH); - } - - @Override - protected void handleKafkaData(DataFW data) - { - final long traceId = data.traceId(); - final long authorization = data.authorization(); - final long budgetId = data.budgetId(); - final int reserved = data.reserved(); - - final OctetsFW extension = data.extension(); - final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); - final KafkaDataExFW kafkaDataEx = - dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; - final KafkaMergedDataExFW kafkaMergedDataEx = - kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; - final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().key() : null; - - if (delegate.group != null && key != null) - { - delegate.group.doKafkaFlush(traceId, authorization, budgetId, reserved); - } - } - - @Override protected void onKafkaWindow( WindowFW window) { final long sequence = window.sequence(); final long acknowledge = window.acknowledge(); final int maximum = window.maximum(); - final long authorization = window.authorization(); - final long traceId = window.traceId(); - final boolean wasOpen = MqttKafkaState.initialOpened(state); assert acknowledge <= sequence; assert acknowledge >= delegate.initialAck; @@ -2870,145 +3019,242 @@ protected void onKafkaWindow( state = MqttKafkaState.openInitial(state); assert initialAck <= initialSeq; + } - if (!wasOpen) + private void doKafkaBeginIfNecessary( + long traceId, + long authorization, + long affinity) + { + if (!MqttKafkaState.initialOpening(state)) { - final long routedId = delegate.session.routedId; - - delegate.group = new KafkaGroupStream(originId, routedId, delegate); - delegate.group.doKafkaBegin(traceId, authorization, 0); - - sendMigrateSignal(traceId, authorization); + doKafkaBegin(traceId, authorization, affinity); } } - } - private final class KafkaSessionStateProxy extends KafkaSessionStream - { - private KafkaSessionStateProxy( - long originId, - long routedId, - MqttSessionProxy delegate) - { - super(originId, routedId, delegate); - } + protected abstract void doKafkaBegin( + long traceId, + long authorization, + long affinity); - @Override - protected void doKafkaBegin( + protected final void doKafkaData( long traceId, long authorization, - long affinity) + long budgetId, + int reserved, + int padding, + int flags, + DirectBuffer buffer, + int offset, + int limit, + Flyweight extension) { - assert state == 0; - this.initialId = supplyInitialId.applyAsLong(routedId); - this.replyId = supplyReplyId.applyAsLong(initialId); + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, buffer, offset, limit, extension); - state = MqttKafkaState.openingInitial(state); + initialSeq += reserved; - KafkaCapabilities capabilities = isSetWillFlag(delegate.sessionFlags) ? - KafkaCapabilities.PRODUCE_ONLY : KafkaCapabilities.PRODUCE_AND_FETCH; - kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId, delegate.clientIdMigrate, - delegate.sessionId, serverRef, capabilities); + assert initialSeq - padding <= initialAck + initialMax; } - @Override - protected void handleKafkaData( - DataFW data) + protected final void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) { - final long traceId = data.traceId(); - final long authorization = data.authorization(); - final long budgetId = data.budgetId(); - final int reserved = data.reserved(); + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); - final int flags = data.flags(); - final OctetsFW payload = data.payload(); - final OctetsFW extension = data.extension(); - final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); - final KafkaDataExFW kafkaDataEx = - dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; - final KafkaMergedDataExFW kafkaMergedDataEx = - kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; - final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().key() : null; + initialSeq += reserved; - if (key != null && payload != null) - { - int keyLen = key.length(); - if (keyLen == delegate.clientId.length()) - { - MqttSessionStateFW sessionState = null; - if (payload.sizeof() > 0) - { - sessionState = mqttSessionStateRO.wrap(payload.buffer(), payload.offset(), payload.limit()); - } - delegate.doMqttData(traceId, authorization, budgetId, reserved, flags, sessionState); - } - else if (keyLen == delegate.clientIdMigrate.length()) - { - delegate.group.doKafkaFlush(traceId, authorization, budgetId, reserved); - } - } + assert initialSeq <= initialAck + initialMax; } - @Override - protected void onKafkaWindow( - WindowFW window) + protected void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int padding, + int flags, + Flyweight payload, + Flyweight extension) { - final long sequence = window.sequence(); - final long acknowledge = window.acknowledge(); - final int maximum = window.maximum(); - final long authorization = window.authorization(); - final long traceId = window.traceId(); - final long budgetId = window.budgetId(); - final int padding = window.padding(); - final int capabilities = window.capabilities(); - final boolean wasOpen = MqttKafkaState.initialOpened(state); + final DirectBuffer buffer = payload.buffer(); + final int offset = payload.offset(); + final int limit = payload.limit(); + final int length = limit - offset; - assert acknowledge <= sequence; - assert acknowledge >= initialAck; - assert maximum >= initialMax; + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, buffer, offset, length, extension); - initialAck = acknowledge; - initialMax = maximum; - state = MqttKafkaState.openInitial(state); + initialSeq += reserved; - assert initialAck <= initialSeq; + assert initialSeq - padding <= initialAck + initialMax; + } - if (!wasOpen) + private void doKafkaFlush( + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, extension); + } + + private void doKafkaEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) { - if (!isSetCleanStart(delegate.sessionFlags)) - { - cancelWillSignal(authorization, traceId); - } - cancelExpirySignal(authorization, traceId); // expiry cancellation + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); - final MqttSessionSignalFW expirySignal = - mqttSessionSignalRW.wrap(sessionSignalBuffer, 0, sessionSignalBuffer.capacity()) - .expiry(w -> w - .instanceId(instanceId.instanceId()) - .clientId(delegate.clientId) - .delay(delegate.sessionExpiryMillis) - .expireAt(MqttTime.UNKNOWN.value())) - .build(); - delegate.sessionPadding += expirySignal.sizeof(); - sendExpirySignal(authorization, traceId, expirySignal); // expire later + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); } - - int budget = initialMax - (int)(initialSeq - initialAck); - long tempSessionPadding = Math.min(budget, delegate.sessionPadding); - delegate.sessionPadding -= tempSessionPadding; - long mqttAck = budget - tempSessionPadding; - delegate.doMqttWindow(authorization, traceId, budgetId, mqttAck, capabilities); } - private void cancelWillSignal( - long authorization, - long traceId) + private void doKafkaAbort( + long traceId, + long authorization) { - String16FW willSignalKey = new String16FW.Builder() - .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) - .set(delegate.clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int capabilities) + { + replyAck = delegate.replyAck; + replyMax = delegate.replyMax; + replyPad = delegate.replyPad; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, replyPad, 0, capabilities); + } + + protected void sendMigrateSignal( + long traceId, + long authorization) + { + Flyweight kafkaMigrateDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.produce(mp -> mp + .deferred(0) + .timestamp(System.currentTimeMillis()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(delegate.clientIdMigrate.length()) + .value(delegate.clientIdMigrate.value(), 0, delegate.clientIdMigrate.length())) + .hashKey(b -> b.length(delegate.clientId.length()) + .value(delegate.clientId.value(), 0, delegate.clientId.length())) + .headersItem(c -> c.nameLen(SENDER_ID_NAME.length()) + .name(SENDER_ID_NAME.value(), 0, SENDER_ID_NAME.length()) + .valueLen(delegate.sessionId.length()) + .value(delegate.sessionId.value(), 0, delegate.sessionId.length())))) + .build(); + + doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, + EMPTY_OCTETS, kafkaMigrateDataEx); + } + + protected final void cancelExpirySignal( + long authorization, + long traceId) + { + String16FW expirySignalKey = new String16FW.Builder() + .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) + .set(delegate.clientId.asString() + EXPIRY_SIGNAL_KEY_POSTFIX, UTF_8).build(); + Flyweight expirySignalKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.produce(mp -> mp + .deferred(0) + .timestamp(System.currentTimeMillis()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(expirySignalKey.length()) + .value(expirySignalKey.value(), 0, expirySignalKey.length())) + .hashKey(b -> b.length(delegate.clientId.length()) + .value(delegate.clientId.value(), 0, delegate.clientId.length())) + .headersItem(h -> + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(EXPIRY_SIGNAL_NAME_OCTETS.sizeof()) + .value(EXPIRY_SIGNAL_NAME_OCTETS)))) + .build(); + + doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, + null, expirySignalKafkaDataEx); + } + + protected final void sendExpirySignal( + long authorization, + long traceId, + Flyweight payload) + { + String16FW expirySignalKey = new String16FW.Builder() + .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) + .set(delegate.clientId.asString() + EXPIRY_SIGNAL_KEY_POSTFIX, UTF_8).build(); + Flyweight expirySignalKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.produce(mp -> mp + .deferred(0) + .timestamp(System.currentTimeMillis()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(expirySignalKey.length()) + .value(expirySignalKey.value(), 0, expirySignalKey.length())) + .hashKey(b -> b.length(delegate.clientId.length()) + .value(delegate.clientId.value(), 0, delegate.clientId.length())) + .headersItem(h -> + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(EXPIRY_SIGNAL_NAME_OCTETS.sizeof()) + .value(EXPIRY_SIGNAL_NAME_OCTETS)))) + .build(); + + doKafkaData(traceId, authorization, 0, payload.sizeof(), delegate.sessionPadding, DATA_FLAG_COMPLETE, + payload, expirySignalKafkaDataEx); + } + + private void sendWillSignal( + long traceId, + long authorization) + { + String16FW willSignalKey = new String16FW.Builder() + .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) + .set(delegate.clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, UTF_8).build(); Flyweight willSignalKafkaDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) @@ -3027,55 +3273,109 @@ private void cancelWillSignal( .value(WILL_SIGNAL_NAME_OCTETS)))) .build(); - doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, - null, willSignalKafkaDataEx); + final MqttSessionSignalFW willSignal = + mqttSessionSignalRW.wrap(sessionSignalBuffer, 0, sessionSignalBuffer.capacity()) + .will(w -> w + .instanceId(instanceId.instanceId()) + .clientId(delegate.clientId) + .delay(delegate.delay) + .deliverAt(supplyTime.getAsLong() + delegate.delay) + .lifetimeId(delegate.lifetimeId) + .willId(delegate.willId)) + .build(); + + doKafkaData(traceId, authorization, 0, willSignal.sizeof(), delegate.sessionPadding, DATA_FLAG_COMPLETE, + willSignal, willSignalKafkaDataEx); + } + } + + private final class KafkaSessionSignalStream extends KafkaSessionStream + { + private KafkaSessionSignalStream( + long originId, + long routedId, + MqttSessionProxy delegate) + { + super(originId, routedId, delegate); } @Override - protected void onKafkaFlush( - FlushFW flush) + protected void onKafkaWindow( + WindowFW window) { - final long sequence = flush.sequence(); - final long acknowledge = flush.acknowledge(); - final long traceId = flush.traceId(); - final long authorization = flush.authorization(); - final long budgetId = flush.budgetId(); - final int reserved = flush.reserved(); + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final boolean wasOpen = MqttKafkaState.initialOpened(state); assert acknowledge <= sequence; - assert sequence >= replySeq; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; - replySeq = sequence; + initialAck = acknowledge; + initialMax = maximum; + state = MqttKafkaState.openInitial(state); - assert replyAck <= replySeq; + assert initialAck <= initialSeq; - delegate.doMqttData(traceId, authorization, budgetId, 0, DATA_FLAG_COMPLETE, EMPTY_OCTETS); + if (!wasOpen) + { + final long routedId = delegate.session.routedId; + + delegate.group = new KafkaGroupStream(originId, routedId, delegate); + delegate.group.doKafkaBegin(traceId, authorization, 0); + + sendMigrateSignal(traceId, authorization); + } } @Override - protected void onKafkaEnd( - EndFW end) + protected void doKafkaBegin( + long traceId, + long authorization, + long affinity) { - final long sequence = end.sequence(); - final long acknowledge = end.acknowledge(); - final long traceId = end.traceId(); - final long authorization = end.authorization(); + assert state == 0; - assert acknowledge <= sequence; - assert sequence >= replySeq; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); - replySeq = sequence; - state = MqttKafkaState.closeReply(state); + state = MqttKafkaState.openingInitial(state); - assert replyAck <= replySeq; + final String server = delegate.redirect ? serverRef : null; + kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, delegate.sessionsTopic, null, delegate.clientIdMigrate, + delegate.sessionId, server, KafkaCapabilities.PRODUCE_AND_FETCH); + } - delegate.doMqttEnd(traceId, authorization); + @Override + protected void onKafkaDataImpl(DataFW data) + { + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + + final OctetsFW extension = data.extension(); + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = + dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + final KafkaMergedDataExFW kafkaMergedDataEx = + kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; + final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().key() : null; + + if (delegate.group != null && key != null) + { + delegate.group.doKafkaFlush(traceId, authorization, budgetId, reserved); + } } } - private final class KafkaFetchWillSignalStream extends KafkaSessionStream + private final class KafkaSessionStateProxy extends KafkaSessionStream { - private KafkaFetchWillSignalStream( + private KafkaSessionStateProxy( long originId, long routedId, MqttSessionProxy delegate) @@ -3084,98 +3384,1215 @@ private KafkaFetchWillSignalStream( } @Override - protected void doKafkaBegin( - long traceId, - long authorization, - long affinity) + protected void onKafkaDataImpl( + DataFW data) { - if (!MqttKafkaState.initialOpening(state)) + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = + dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + final KafkaMergedDataExFW kafkaMergedDataEx = + kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; + final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().key() : null; + + if (key != null && payload != null) { - state = MqttKafkaState.openingInitial(state); + int keyLen = key.length(); + if (keyLen == delegate.clientId.length()) + { + MqttSessionStateFW sessionState = null; + if (payload.sizeof() > 0) + { + sessionState = mqttSessionStateRO.wrap(payload.buffer(), payload.offset(), payload.limit()); + } + delegate.doMqttData(traceId, authorization, budgetId, reserved, flags, sessionState); + } + else if (keyLen == delegate.clientIdMigrate.length()) + { + delegate.group.doKafkaFlush(traceId, authorization, budgetId, reserved); + } + } + } + + @Override + protected void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + final long budgetId = flush.budgetId(); + final int reserved = flush.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + + delegate.doMqttData(traceId, authorization, budgetId, 0, DATA_FLAG_COMPLETE, EMPTY_OCTETS); + } + + @Override + protected void onKafkaEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttEnd(traceId, authorization); + } + + @Override + protected void onKafkaWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + final boolean wasOpen = MqttKafkaState.initialOpened(state); + + assert acknowledge <= sequence; + assert acknowledge >= initialAck; + assert maximum >= initialMax; + + initialAck = acknowledge; + initialMax = maximum; + state = MqttKafkaState.openInitial(state); + + assert initialAck <= initialSeq; + + if (!wasOpen) + { + if (!isSetCleanStart(delegate.sessionFlags)) + { + cancelWillSignal(authorization, traceId); + } + cancelExpirySignal(authorization, traceId); // expiry cancellation + + final MqttSessionSignalFW expirySignal = + mqttSessionSignalRW.wrap(sessionSignalBuffer, 0, sessionSignalBuffer.capacity()) + .expiry(w -> w + .instanceId(instanceId.instanceId()) + .clientId(delegate.clientId) + .delay(delegate.sessionExpiryMillis) + .expireAt(MqttTime.UNKNOWN.value())) + .build(); + delegate.sessionPadding += expirySignal.sizeof(); + sendExpirySignal(authorization, traceId, expirySignal); // expire later + } + + int budget = initialMax - (int)(initialSeq - initialAck); + long tempSessionPadding = Math.min(budget, delegate.sessionPadding); + delegate.sessionPadding -= tempSessionPadding; + long mqttAck = budget - tempSessionPadding; + delegate.doMqttWindow(authorization, traceId, budgetId, mqttAck, capabilities); + } + + @Override + protected void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + state = MqttKafkaState.openingInitial(state); + + KafkaCapabilities capabilities = isSetWillFlag(delegate.sessionFlags) ? + KafkaCapabilities.PRODUCE_ONLY : KafkaCapabilities.PRODUCE_AND_FETCH; + final String server = delegate.redirect ? serverRef : null; + kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId, delegate.clientIdMigrate, + delegate.sessionId, server, capabilities); + } + + private void cancelWillSignal( + long authorization, + long traceId) + { + String16FW willSignalKey = new String16FW.Builder() + .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) + .set(delegate.clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, UTF_8).build(); + Flyweight willSignalKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.produce(mp -> mp + .deferred(0) + .timestamp(System.currentTimeMillis()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(willSignalKey.length()) + .value(willSignalKey.value(), 0, willSignalKey.length())) + .hashKey(b -> b.length(delegate.clientId.length()) + .value(delegate.clientId.value(), 0, delegate.clientId.length())) + .headersItem(h -> + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(WILL_SIGNAL_NAME_OCTETS.sizeof()) + .value(WILL_SIGNAL_NAME_OCTETS)))) + .build(); + + doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, + null, willSignalKafkaDataEx); + } + } + + private final class KafkaFetchWillSignalStream extends KafkaSessionStream + { + private KafkaFetchWillSignalStream( + long originId, + long routedId, + MqttSessionProxy delegate) + { + super(originId, routedId, delegate); + } + + @Override + protected void onKafkaDataImpl( + DataFW data) + { + final OctetsFW extension = data.extension(); + final OctetsFW payload = data.payload(); + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = + dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + final KafkaMergedDataExFW kafkaMergedDataEx = + kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; + final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().key() : null; + + if (key != null && payload != null) + { + MqttSessionSignalFW sessionSignal = + mqttSessionSignalRO.wrap(payload.buffer(), payload.offset(), payload.limit()); + if (sessionSignal != null) + { + delegate.lifetimeId = sessionSignal.will().lifetimeId().asString(); + } + } + } + + @Override + protected void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + final long reserved = flush.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + + delegate.session.doKafkaEnd(traceId, authorization); + final long routedId = delegate.session.routedId; + + delegate.session = new KafkaSessionSignalStream(originId, routedId, delegate); + delegate.session.doKafkaBeginIfNecessary(traceId, authorization, 0); + } + + @Override + protected void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + if (!MqttKafkaState.initialOpening(state)) + { + state = MqttKafkaState.openingInitial(state); + + final String server = delegate.redirect ? serverRef : null; + kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId, server); + } + } + } + + private final class KafkaGroupStream + { + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final MqttSessionProxy delegate; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaGroupStream( + long originId, + long routedId, + MqttSessionProxy delegate) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + } + + private void onGroupMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onKafkaFlush(flush); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onKafkaAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onKafkaReset(reset); + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + assert acknowledge >= replyAck; + + replySeq = sequence; + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + final OctetsFW extension = begin.extension(); + + int sessionExpiryMillisInRange; + if (extension.sizeof() > 0) + { + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap); + + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_GROUP; + final KafkaGroupBeginExFW kafkaGroupBeginEx = kafkaBeginEx.group(); + + sessionExpiryMillisInRange = kafkaGroupBeginEx.timeout(); + delegate.onGroupJoined(kafkaGroupBeginEx.instanceId().asString(), kafkaGroupBeginEx.host().asString(), + kafkaGroupBeginEx.port(), sessionExpiryMillisInRange); + } + + delegate.onSessionBegin(traceId, authorization, affinity); + doKafkaWindow(traceId, authorization, 0, 0, 0); + } + + private void onKafkaData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final int reserved = data.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + if (replySeq > replyAck + replyMax) + { + doKafkaReset(traceId); + delegate.doMqttAbort(traceId, authorization); + } + } + + private void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + final long reserved = flush.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + + final OctetsFW extension = flush.extension(); + final ExtensionFW flushEx = extension.get(extensionRO::tryWrap); + final KafkaFlushExFW kafkaFlushEx = + flushEx != null && flushEx.typeId() == kafkaTypeId ? extension.get(kafkaFlushExRO::tryWrap) : null; + final KafkaGroupFlushExFW kafkaGroupFlushEx = + kafkaFlushEx != null && kafkaFlushEx.kind() == KafkaFlushExFW.KIND_GROUP ? kafkaFlushEx.group() : null; + final String16FW leaderId = kafkaGroupFlushEx != null ? kafkaGroupFlushEx.leaderId() : null; + final String16FW memberId = kafkaGroupFlushEx != null ? kafkaGroupFlushEx.memberId() : null; + final int members = kafkaGroupFlushEx != null ? kafkaGroupFlushEx.members().fieldCount() : 0; + final int generationId = kafkaGroupFlushEx != null ? kafkaGroupFlushEx.generationId() : 0; + + if (leaderId.equals(memberId)) + { + delegate.onSessionBecomesLeader(traceId, authorization, members, memberId.asString(), generationId); + } + + if (!MqttKafkaState.initialClosed(state)) + { + doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, EMPTY_OCTETS, EMPTY_OCTETS); + } + } + + private void onKafkaEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttEnd(traceId, authorization); + } + + private void onKafkaAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttAbort(traceId, authorization); + } + + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + final OctetsFW extension = reset.extension(); + + assert acknowledge <= sequence; + + + final KafkaResetExFW kafkaResetEx = extension.get(kafkaResetExRO::tryWrap); + final int error = kafkaResetEx != null ? kafkaResetEx.error() : -1; + + Flyweight mqttResetEx = EMPTY_OCTETS; + if (error != -1) + { + mqttResetEx = + mqttSessionResetExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) + .typeId(mqttTypeId) + .reasonCode(MQTT_REASON_CODES.get(error)) + .reason(MQTT_REASONS.getOrDefault(error, DEFAULT_REASON)) + .build(); + } + delegate.doMqttReset(traceId, mqttResetEx); + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.openingInitial(state); + + kafka = newGroupStream(this::onGroupMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, delegate.clientId, delegate.sessionExpiryMillis); + } + + private void doKafkaFlush( + long traceId, + long authorization, + long budgetId, + int reserved) + { + initialSeq = delegate.initialSeq; + + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, EMPTY_OCTETS); + } + + private void doKafkaEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + replyAck = delegate.replyAck; + replyMax = delegate.replyMax; + replyPad = delegate.replyPad; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + + private void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + } + + private final class KafkaMetaStream + { + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final MqttSessionProxy delegate; + private final String16FW topic; + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaMetaStream( + long originId, + long routedId, + MqttSessionProxy delegate, + String16FW topic, + boolean retained) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.topic = topic; + } + + private void onMetaMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onKafkaAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onKafkaReset(reset); + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + assert acknowledge >= replyAck; + + replySeq = sequence; + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + doKafkaWindow(traceId, authorization, 0, 0, 0); + } + + private void onKafkaData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final int reserved = data.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + if (replySeq > replyAck + replyMax) + { + doKafkaReset(traceId); + delegate.doMqttAbort(traceId, authorization); + } + + final OctetsFW extension = data.extension(); + final KafkaDataExFW kafkaDataEx = extension.get(kafkaDataExRO::tryWrap); + final KafkaMetaDataExFW kafkaMetaDataEx = kafkaDataEx.meta(); + final Array32FW partitions = kafkaMetaDataEx.partitions(); + + delegate.onPartitionsFetched(traceId, authorization, topic, partitions, this); + doKafkaEnd(traceId, authorization); + } + + private void onKafkaEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + } + + private void onKafkaAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttAbort(traceId, authorization); + } + + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + + delegate.doMqttReset(traceId, EMPTY_OCTETS); + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.openingInitial(state); + + kafka = newMetaStream(this::onMetaMessage, originId, routedId, initialId, initialSeq, initialAck, + initialMax, traceId, authorization, affinity, topic); + } + + private void doKafkaEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + replyAck = delegate.replyAck; + replyMax = delegate.replyMax; + replyPad = delegate.replyPad; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + } + + private final class KafkaOffsetFetchStream + { + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final MqttSessionProxy delegate; + private final String host; + private final int port; + private final String topic; + private final Array32FW partitions; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaOffsetFetchStream( + long originId, + long routedId, + MqttSessionProxy delegate, + String host, + int port, + String topic, + Array32FW partitions) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.host = host; + this.port = port; + this.topic = topic; + this.partitions = partitions; + } + + private void onOffsetFetchMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onKafkaAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onKafkaReset(reset); + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + assert acknowledge >= replyAck; + + replySeq = sequence; + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + doKafkaWindow(traceId, authorization, 0, 0, 0); + } + + private void onKafkaData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final int reserved = data.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + if (replySeq > replyAck + replyMax) + { + doKafkaReset(traceId); + delegate.doMqttAbort(traceId, authorization); + } + + final OctetsFW extension = data.extension(); + final KafkaDataExFW kafkaDataEx = extension.get(kafkaDataExRO::tryWrap); + final KafkaOffsetFetchDataExFW kafkaOffsetFetchDataEx = kafkaDataEx.offsetFetch(); + final Array32FW partitions = kafkaOffsetFetchDataEx.partitions(); + + delegate.onOffsetFetched(traceId, authorization, topic, partitions, this); + doKafkaEnd(traceId, authorization); + } + + private void onKafkaEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + } + + private void onKafkaAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttAbort(traceId, authorization); + } + + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + + delegate.doMqttReset(traceId, EMPTY_OCTETS); + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.openingInitial(state); + + kafka = newOffsetFetchStream(this::onOffsetFetchMessage, originId, routedId, initialId, initialSeq, initialAck, + initialMax, traceId, authorization, affinity, delegate.clientId, host, port, topic, partitions); + } + + private void doKafkaEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + replyAck = delegate.replyAck; + replyMax = delegate.replyMax; + replyPad = delegate.replyPad; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + } + + private final class KafkaInitProducerStream + { + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final MqttSessionProxy delegate; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaInitProducerStream( + long originId, + long routedId, + MqttSessionProxy delegate) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + } + + private void onInitProducerMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onKafkaAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onKafkaReset(reset); + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + assert acknowledge >= replyAck; + + replySeq = sequence; + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + final OctetsFW extension = begin.extension(); + + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap); + + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_INIT_PRODUCER_ID; + final KafkaInitProducerIdBeginExFW kafkaInitProducerIdBeginEx = kafkaBeginEx.initProducerId(); - kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId, serverRef); - } - } + long producerId = kafkaInitProducerIdBeginEx.producerId(); + short producerEpoch = kafkaInitProducerIdBeginEx.producerEpoch(); - @Override - protected void handleKafkaData( - DataFW data) - { - final OctetsFW extension = data.extension(); - final OctetsFW payload = data.payload(); - final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); - final KafkaDataExFW kafkaDataEx = - dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; - final KafkaMergedDataExFW kafkaMergedDataEx = - kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; - final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().key() : null; + delegate.onProducerInit(traceId, authorization, producerId, producerEpoch); - if (key != null && payload != null) - { - MqttSessionSignalFW sessionSignal = - mqttSessionSignalRO.wrap(payload.buffer(), payload.offset(), payload.limit()); - if (sessionSignal != null) - { - delegate.lifetimeId = sessionSignal.will().lifetimeId().asString(); - } - } + doKafkaWindow(traceId, authorization, 0, 0, 0); + doKafkaEnd(traceId, authorization); } - @Override - protected void onKafkaFlush( - FlushFW flush) + private void onKafkaEnd( + EndFW end) { - final long sequence = flush.sequence(); - final long acknowledge = flush.acknowledge(); - final long traceId = flush.traceId(); - final long authorization = flush.authorization(); + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); assert acknowledge <= sequence; assert sequence >= replySeq; replySeq = sequence; + state = MqttKafkaState.closeReply(state); assert replyAck <= replySeq; - - delegate.session.doKafkaEnd(traceId, authorization); - final long routedId = delegate.session.routedId; - - delegate.session = new KafkaSessionSignalStream(originId, routedId, delegate); - delegate.session.doKafkaBeginIfNecessary(traceId, authorization, 0); } - } - private final class KafkaGroupStream - { - private MessageConsumer kafka; - private final long originId; - private final long routedId; - private final long initialId; - private final long replyId; - private final MqttSessionProxy delegate; + private void onKafkaAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); - private int state; + assert acknowledge <= sequence; + assert sequence >= replySeq; - private long initialSeq; - private long initialAck; - private int initialMax; + replySeq = sequence; + state = MqttKafkaState.closeReply(state); - private long replySeq; - private long replyAck; - private int replyMax; - private int replyPad; + assert replyAck <= replySeq; - private KafkaGroupStream( - long originId, - long routedId, - MqttSessionProxy delegate) + delegate.doMqttAbort(traceId, authorization); + } + + private void onKafkaReset( + ResetFW reset) { - this.originId = originId; - this.routedId = routedId; - this.delegate = delegate; - this.initialId = supplyInitialId.applyAsLong(routedId); - this.replyId = supplyReplyId.applyAsLong(initialId); + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + + delegate.doMqttReset(traceId, EMPTY_OCTETS); } private void doKafkaBegin( @@ -3188,20 +4605,8 @@ private void doKafkaBegin( initialMax = delegate.initialMax; state = MqttKafkaState.openingInitial(state); - kafka = newGroupStream(this::onGroupMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, delegate.clientId, delegate.sessionExpiryMillis); - } - - private void doKafkaFlush( - long traceId, - long authorization, - long budgetId, - int reserved) - { - initialSeq = delegate.initialSeq; - - doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, reserved, EMPTY_OCTETS); + kafka = newInitProducerStream(this::onInitProducerMessage, originId, routedId, initialId, initialSeq, initialAck, + initialMax, traceId, authorization, affinity); } private void doKafkaEnd( @@ -3234,7 +4639,72 @@ private void doKafkaAbort( } } - private void onGroupMessage( + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + replyAck = delegate.replyAck; + replyMax = delegate.replyMax; + replyPad = delegate.replyPad; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + } + + private final class KafkaOffsetCommitStream + { + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final String groupHost; + private final int groupPort; + private final MqttSessionProxy delegate; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaOffsetCommitStream( + long originId, + long routedId, + MqttSessionProxy delegate, + String groupHost, + int groupPort) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.groupHost = groupHost; + this.groupPort = groupPort; + } + + private void onOffsetCommitMessage( int msgTypeId, DirectBuffer buffer, int index, @@ -3246,14 +4716,6 @@ private void onGroupMessage( final BeginFW begin = beginRO.wrap(buffer, index, index + length); onKafkaBegin(begin); break; - case DataFW.TYPE_ID: - final DataFW data = dataRO.wrap(buffer, index, index + length); - onKafkaData(data); - break; - case FlushFW.TYPE_ID: - final FlushFW flush = flushRO.wrap(buffer, index, index + length); - onKafkaFlush(flush); - break; case EndFW.TYPE_ID: final EndFW end = endRO.wrap(buffer, index, index + length); onKafkaEnd(end); @@ -3266,55 +4728,10 @@ private void onGroupMessage( final ResetFW reset = resetRO.wrap(buffer, index, index + length); onKafkaReset(reset); break; - } - } - - private void onKafkaFlush( - FlushFW flush) - { - final long sequence = flush.sequence(); - final long acknowledge = flush.acknowledge(); - final long traceId = flush.traceId(); - final long authorization = flush.authorization(); - - assert acknowledge <= sequence; - assert sequence >= replySeq; - - replySeq = sequence; - - assert replyAck <= replySeq; - - final OctetsFW extension = flush.extension(); - final ExtensionFW flushEx = extension.get(extensionRO::tryWrap); - final KafkaFlushExFW kafkaFlushEx = - flushEx != null && flushEx.typeId() == kafkaTypeId ? extension.get(kafkaFlushExRO::tryWrap) : null; - final KafkaGroupFlushExFW kafkaGroupDataEx = - kafkaFlushEx != null && kafkaFlushEx.kind() == KafkaFlushExFW.KIND_GROUP ? kafkaFlushEx.group() : null; - final String16FW leaderId = kafkaGroupDataEx != null ? kafkaGroupDataEx.leaderId() : null; - final String16FW memberId = kafkaGroupDataEx != null ? kafkaGroupDataEx.memberId() : null; - final int members = kafkaGroupDataEx != null ? kafkaGroupDataEx.members().fieldCount() : 0; - - if (leaderId.equals(memberId)) - { - if (members > 1) - { - delegate.session.sendMigrateSignal(traceId, authorization); - delegate.session.sendWillSignal(traceId, authorization); - delegate.session.doKafkaEnd(traceId, authorization); - doKafkaEnd(traceId, authorization); - } - else - { - delegate.session.doKafkaEnd(traceId, authorization); - final long routedId = delegate.session.routedId; - delegate.session = new KafkaSessionStateProxy(originId, routedId, delegate); - delegate.session.doKafkaBeginIfNecessary(traceId, authorization, 0); - } - } - - if (!MqttKafkaState.initialClosed(state)) - { - doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, EMPTY_OCTETS, EMPTY_OCTETS); + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onKafkaWindow(window); + break; } } @@ -3336,61 +4753,10 @@ private void onKafkaBegin( replyAck = acknowledge; replyMax = maximum; state = MqttKafkaState.openingReply(state); - - assert replyAck <= replySeq; - - final OctetsFW extension = begin.extension(); - - int sessionExpiryMillisInRange = delegate.sessionExpiryMillis; - if (extension.sizeof() > 0) - { - final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap); - - assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_GROUP; - final KafkaGroupBeginExFW kafkaGroupBeginEx = kafkaBeginEx.group(); - - sessionExpiryMillisInRange = kafkaGroupBeginEx.timeout(); - } - - if (delegate.sessionExpiryMillis != sessionExpiryMillisInRange) - { - delegate.sessionExpiryMillis = sessionExpiryMillisInRange; - } - - Flyweight mqttBeginEx = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) - .typeId(mqttTypeId) - .session(sessionBuilder -> sessionBuilder - .flags(delegate.sessionFlags) - .expiry((int) TimeUnit.MILLISECONDS.toSeconds(delegate.sessionExpiryMillis)) - .qosMax(MQTT_KAFKA_MAX_QOS) - .capabilities(MQTT_KAFKA_CAPABILITIES) - .clientId(delegate.clientId)) - .build(); - - delegate.doMqttBegin(traceId, authorization, affinity, mqttBeginEx); - doKafkaWindow(traceId, authorization, 0, 0, 0); - } - - private void onKafkaData( - DataFW data) - { - final long sequence = data.sequence(); - final long acknowledge = data.acknowledge(); - final long traceId = data.traceId(); - final long authorization = data.authorization(); - final int reserved = data.reserved(); - - assert acknowledge <= sequence; - assert sequence >= replySeq; - - replySeq = sequence + reserved; - - assert replyAck <= replySeq; - if (replySeq > replyAck + replyMax) - { - doKafkaReset(traceId); - delegate.doMqttAbort(traceId, authorization); - } + + assert replyAck <= replySeq; + + doKafkaWindow(traceId, authorization, 0, 0, 0); } private void onKafkaEnd( @@ -3398,8 +4764,6 @@ private void onKafkaEnd( { final long sequence = end.sequence(); final long acknowledge = end.acknowledge(); - final long traceId = end.traceId(); - final long authorization = end.authorization(); assert acknowledge <= sequence; assert sequence >= replySeq; @@ -3408,8 +4772,6 @@ private void onKafkaEnd( state = MqttKafkaState.closeReply(state); assert replyAck <= replySeq; - - delegate.doMqttEnd(traceId, authorization); } private void onKafkaAbort( @@ -3437,25 +4799,54 @@ private void onKafkaReset( final long sequence = reset.sequence(); final long acknowledge = reset.acknowledge(); final long traceId = reset.traceId(); - final OctetsFW extension = reset.extension(); assert acknowledge <= sequence; + delegate.doMqttReset(traceId, EMPTY_OCTETS); + } - final KafkaResetExFW kafkaResetEx = extension.get(kafkaResetExRO::tryWrap); - final int error = kafkaResetEx != null ? kafkaResetEx.error() : -1; + private void onKafkaWindow( + WindowFW window) + { + final long traceId = window.traceId(); + final long authorization = window.authorization(); + final long budgetId = window.budgetId(); + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final boolean wasOpen = MqttKafkaState.initialOpened(state); - Flyweight mqttResetEx = EMPTY_OCTETS; - if (error != -1) + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + state = MqttKafkaState.openInitial(state); + + assert initialAck <= initialSeq; + + if (!wasOpen) { - mqttResetEx = - mqttSessionResetExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) - .typeId(mqttTypeId) - .reasonCode(MQTT_REASON_CODES.get(error)) - .reason(MQTT_REASONS.get(error)) - .build(); + delegate.onOffsetCommitOpened(traceId, authorization, budgetId); } - delegate.doMqttReset(traceId, mqttResetEx); + else + { + delegate.onOffsetCommitAck(traceId, authorization); + } + } + + private void doKafkaData( + long traceId, + long authorization, + long budgetId, + int flags, + Flyweight extension) + { + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, 0, EMPTY_OCTETS, extension); + + assert initialSeq <= initialAck + initialMax; } private void doKafkaReset( @@ -3484,25 +4875,51 @@ private void doKafkaWindow( traceId, authorization, budgetId, padding, replyPad, capabilities); } - private void doKafkaData( + private void doKafkaBegin( long traceId, long authorization, - long budgetId, - int reserved, - int flags, - OctetsFW payload, - Flyweight extension) + long affinity) { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.openingInitial(state); - doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, flags, reserved, payload, extension); + kafka = newOffsetCommitStream(this::onOffsetCommitMessage, originId, routedId, initialId, initialSeq, initialAck, + initialMax, traceId, authorization, affinity, delegate.clientId, delegate.memberId, delegate.groupInstanceId, + delegate.groupHost, delegate.groupPort); + } - initialSeq += reserved; + private void doKafkaEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); - assert initialSeq <= initialAck + initialMax; + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } } - } + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + } private void doBegin( MessageConsumer receiver, @@ -4018,6 +5435,190 @@ private MessageConsumer newGroupStream( return receiver; } + private MessageConsumer newMetaStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + String16FW topic) + { + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .meta(m -> m + .topic(topic)) + .build(); + + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(kafkaBeginEx.buffer(), kafkaBeginEx.offset(), kafkaBeginEx.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private MessageConsumer newOffsetFetchStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + String16FW clientId, + String host, + int port, + String topic, + Array32FW partitions) + { + final String groupId = String.format("%s-%s", clientId.asString(), GROUPID_SESSION_SUFFIX); + + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .offsetFetch(o -> o + .groupId(groupId) + .host(host) + .port(port) + .topic(topic) + .partitions(ps -> partitions.forEach(p -> ps.item(tp -> tp.partitionId(p.partitionId()))))) + .build(); + + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(kafkaBeginEx.buffer(), kafkaBeginEx.offset(), kafkaBeginEx.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private MessageConsumer newInitProducerStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity) + { + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .initProducerId(p -> p + .producerId(0) + .producerEpoch((short) 0)) + .build(); + + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(kafkaBeginEx.buffer(), kafkaBeginEx.offset(), kafkaBeginEx.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private MessageConsumer newOffsetCommitStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + String16FW clientId, + String memberId, + String instanceId, + String host, + int port) + { + final String groupId = String.format("%s-%s", clientId.asString(), GROUPID_SESSION_SUFFIX); + + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .offsetCommit(o -> o + .groupId(groupId) + .memberId(memberId) + .instanceId(instanceId) + .host(host) + .port(port)) + .build(); + + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(kafkaBeginEx.buffer(), kafkaBeginEx.offset(), kafkaBeginEx.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + private void doWindow( MessageConsumer sender, long originId, diff --git a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java index 7d067e11ab..fde1562c52 100644 --- a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java +++ b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java @@ -62,6 +62,7 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaSkip; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormat; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttQoS; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttSubscribeOffsetMetadataFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttTopicFilterFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; @@ -83,7 +84,6 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttFlushExFW; -import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttOffsetMetadataFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttOffsetStateFlags; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSubscribeBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSubscribeFlushExFW; @@ -131,7 +131,7 @@ public class MqttKafkaSubscribeFactory implements MqttKafkaStreamFactory private final WindowFW.Builder windowRW = new WindowFW.Builder(); private final ResetFW.Builder resetRW = new ResetFW.Builder(); private final MqttSubscribeMessageFW.Builder mqttSubscribeMessageRW = new MqttSubscribeMessageFW.Builder(); - private final MqttOffsetMetadataFW.Builder mqttOffsetMetadataRW = new MqttOffsetMetadataFW.Builder(); + private final MqttSubscribeOffsetMetadataFW.Builder mqttOffsetMetadataRW = new MqttSubscribeOffsetMetadataFW.Builder(); private final ExtensionFW extensionRO = new ExtensionFW(); private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); @@ -141,7 +141,7 @@ public class MqttKafkaSubscribeFactory implements MqttKafkaStreamFactory private final KafkaFlushExFW kafkaFlushExRO = new KafkaFlushExFW(); private final KafkaHeaderFW kafkaHeaderRO = new KafkaHeaderFW(); private final MqttSubscribeMessageFW mqttSubscribeMessageRO = new MqttSubscribeMessageFW(); - private final MqttOffsetMetadataFW mqttOffsetMetadataRO = new MqttOffsetMetadataFW(); + private final MqttSubscribeOffsetMetadataFW mqttOffsetMetadataRO = new MqttSubscribeOffsetMetadataFW(); private final MqttDataExFW.Builder mqttDataExRW = new MqttDataExFW.Builder(); private final MqttFlushExFW.Builder mqttFlushExRW = new MqttFlushExFW.Builder(); @@ -549,36 +549,6 @@ else if (qos == MqttQoS.EXACTLY_ONCE.value() && state != MqttOffsetStateFlags.IN } } - private void commitDeferredOffsets( - long traceId, - long authorization, - long budgetId, - int reserved, - OffsetHighWaterMark highWaterMark) - { - long offset = highWaterMark.offset; - DeferredOffsetCommit deferredOffsetCommit = highWaterMark.deferredOffsetCommits.get(offset); - - while (deferredOffsetCommit != null) - { - deferredOffsetCommit.commit(traceId, authorization, budgetId, reserved); - highWaterMark.deferredOffsetCommits.remove(offset); - offset = highWaterMark.increase(); - deferredOffsetCommit = highWaterMark.deferredOffsetCommits.get(highWaterMark.offset); - } - } - - private void commitOffset( - long traceId, - long authorization, - long budgetId, - int reserved, - KafkaProxy proxy, - OffsetCommit offsetCommit) - { - proxy.doKafkaConsumerFlush(traceId, authorization, budgetId, reserved, offsetCommit); - } - private void onMqttData( DataFW data) { @@ -814,6 +784,36 @@ private void doMqttReset( } } + private void commitDeferredOffsets( + long traceId, + long authorization, + long budgetId, + int reserved, + OffsetHighWaterMark highWaterMark) + { + long offset = highWaterMark.offset; + DeferredOffsetCommit deferredOffsetCommit = highWaterMark.deferredOffsetCommits.get(offset); + + while (deferredOffsetCommit != null) + { + deferredOffsetCommit.commit(traceId, authorization, budgetId, reserved); + highWaterMark.deferredOffsetCommits.remove(offset); + offset = highWaterMark.increase(); + deferredOffsetCommit = highWaterMark.deferredOffsetCommits.get(highWaterMark.offset); + } + } + + private void commitOffset( + long traceId, + long authorization, + long budgetId, + int reserved, + KafkaProxy proxy, + OffsetCommit offsetCommit) + { + proxy.doKafkaConsumerFlush(traceId, authorization, budgetId, reserved, offsetCommit); + } + public int replyPendingAck() { return (int)(replySeq - replyAck); @@ -860,76 +860,6 @@ private KafkaMessagesBootstrap( this.replyId = supplyReplyId.applyAsLong(initialId); } - private void doKafkaBeginAt( - long timeMillis) - { - this.reconnectAt = signaler.signalAt( - timeMillis, - SIGNAL_CONNECT_BOOTSTRAP_STREAM, - this::onSignalConnectBootstrapStream); - } - - private void onSignalConnectBootstrapStream( - int signalId) - { - assert signalId == SIGNAL_CONNECT_BOOTSTRAP_STREAM; - - this.reconnectAt = NO_CANCEL_ID; - doKafkaBegin(supplyTraceId.get(), 0, 0); - } - - private void doKafkaBegin( - long traceId, - long authorization, - long affinity) - { - reconnectAttempt = 0; - state = MqttKafkaState.openingInitial(state); - - kafka = newKafkaBootstrapStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, - initialMax, traceId, authorization, affinity, topic, serverRef); - } - - private void doKafkaEnd( - long traceId, - long authorization) - { - if (!MqttKafkaState.initialClosed(state)) - { - state = MqttKafkaState.closeInitial(state); - - doEnd(kafka, originId, routedId, initialId, 0, 0, 0, traceId, authorization); - - signaler.cancel(reconnectAt); - reconnectAt = NO_CANCEL_ID; - } - } - - private void doKafkaAbort( - long traceId, - long authorization) - { - if (!MqttKafkaState.initialClosed(state)) - { - state = MqttKafkaState.closeInitial(state); - - doAbort(kafka, originId, routedId, initialId, 0, 0, 0, traceId, authorization); - } - } - - private void doKafkaWindow( - long traceId, - long authorization, - long budgetId, - int padding, - int capabilities) - { - replyMax = 8192; - - doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, budgetId, padding, 0, capabilities); - } - private void onKafkaMessage( int msgTypeId, DirectBuffer buffer, @@ -1065,6 +995,77 @@ private void onKafkaReset( this::onSignalConnectBootstrapStream); } } + + + private void doKafkaBeginAt( + long timeMillis) + { + this.reconnectAt = signaler.signalAt( + timeMillis, + SIGNAL_CONNECT_BOOTSTRAP_STREAM, + this::onSignalConnectBootstrapStream); + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + reconnectAttempt = 0; + state = MqttKafkaState.openingInitial(state); + + kafka = newKafkaBootstrapStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, + initialMax, traceId, authorization, affinity, topic, serverRef); + } + + private void doKafkaEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, 0, 0, 0, traceId, authorization); + + signaler.cancel(reconnectAt); + reconnectAt = NO_CANCEL_ID; + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, 0, 0, 0, traceId, authorization); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + replyMax = 8192; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, 0, capabilities); + } + + private void onSignalConnectBootstrapStream( + int signalId) + { + assert signalId == SIGNAL_CONNECT_BOOTSTRAP_STREAM; + + this.reconnectAt = NO_CANCEL_ID; + doKafkaBegin(supplyTraceId.get(), 0, 0); + } } abstract class KafkaProxy @@ -1126,249 +1127,47 @@ private KafkaMessagesProxy( this.incompletePacketIds = new Int2ObjectHashMap<>(); } - public boolean matchesTopicFilter( - String topicFilter) + private void onKafkaMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) { - return routeConfig.matches(topicFilter, MqttKafkaConditionKind.SUBSCRIBE); + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onKafkaAbort(abort); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onKafkaFlush(flush); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onKafkaWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onKafkaReset(reset); + break; + } } - private void doKafkaBegin( - long traceId, - long authorization, - long affinity, - Array32FW filters) - { - if (!MqttKafkaState.initialOpening(state)) - { - final Array32FW.Builder filterBuilder = - filtersRW.wrap(filterBuffer, 0, filterBuffer.capacity()); - - filters.forEach(f -> - { - if (matchesTopicFilter(f.pattern().asString())) - { - int subscriptionId = (int) f.subscriptionId(); - if (!messagesSubscriptionIds.contains(subscriptionId)) - { - messagesSubscriptionIds.add(subscriptionId); - } - filterBuilder.item(fb -> fb - .subscriptionId(subscriptionId).qos(f.qos()).flags(f.flags()).pattern(f.pattern())); - } - }); - - initialSeq = mqtt.initialSeq; - initialAck = mqtt.initialAck; - initialMax = mqtt.initialMax; - state = MqttKafkaState.openingInitial(state); - - kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, mqtt.clientId, topic, filterBuilder.build(), mqtt.qos, - KafkaOffsetType.LIVE); - } - } - - @Override - protected void doKafkaConsumerFlush( - long traceId, - long authorization, - long budgetId, - int reserved, - OffsetCommit offsetCommit) - { - final int qos = offsetCommit.qos; - final PartitionOffset offset = offsetCommit.partitionOffset; - final MqttOffsetStateFlags state = offsetCommit.state; - final int packetId = offsetCommit.packetId; - - - if (qos == MqttQoS.EXACTLY_ONCE.value() && state == MqttOffsetStateFlags.COMPLETE) - { - incompletePacketIds.computeIfAbsent(offset.partitionId, c -> new IntArrayList()).removeInt(packetId); - } - else if (state == MqttOffsetStateFlags.INCOMPLETE) - { - incompletePacketIds.computeIfAbsent(offset.partitionId, c -> new IntArrayList()).add(packetId); - } - - final int correlationId = state == MqttOffsetStateFlags.INCOMPLETE ? packetId : -1; - - final KafkaFlushExFW kafkaFlushEx = - kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m.consumer(f -> - { - f.progress(p -> - { - p.partitionId(offset.partitionId).partitionOffset(offset.offset + 1); - final IntArrayList incomplete = incompletePacketIds.get(offset.partitionId); - final String16FW partitionMetadata = incomplete == null || incomplete.isEmpty() ? - EMPTY_STRING : offsetMetadataListToString(incomplete); - p.metadata(partitionMetadata); - }); - f.correlationId(correlationId); - })) - .build(); - - doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, reserved, kafkaFlushEx); - } - - private void doKafkaFlush( - long traceId, - long authorization, - long budgetId, - int reserved, - int qos, - Array32FW filters) - { - initialSeq = mqtt.initialSeq; - - messagesSubscriptionIds.clear(); - - final KafkaFlushExFW kafkaFlushEx = - kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m.fetch(f -> - { - f.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); - filters.forEach(filter -> - { - if (matchesTopicFilter(filter.pattern().asString())) - { - final int subscriptionId = (int) filter.subscriptionId(); - if (!messagesSubscriptionIds.contains(subscriptionId)) - { - messagesSubscriptionIds.add(subscriptionId); - } - if ((filter.flags() & SEND_RETAIN_FLAG) != 0) - { - mqtt.retainAvailable = true; - } - f.filtersItem(fi -> - { - fi.conditionsItem(ci -> buildHeaders(ci, filter.pattern().asString())); - - final boolean noLocal = (filter.flags() & NO_LOCAL_FLAG) != 0; - if (noLocal) - { - final DirectBuffer valueBuffer = mqtt.clientId.value(); - fi.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> - h.nameLen(helper.kafkaLocalHeaderName.sizeof()) - .name(helper.kafkaLocalHeaderName) - .valueLen(valueBuffer.capacity()) - .value(valueBuffer, 0, valueBuffer.capacity()))))); - } - - final int maxQos = filter.qos(); - if (maxQos != qos || maxQos == MqttQoS.EXACTLY_ONCE.value()) - { - for (int level = 0; level <= MqttQoS.EXACTLY_ONCE.value(); level++) - { - if (level != qos) - { - final DirectBuffer valueBuffer = qosNames.get(level).value(); - fi.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> - h.nameLen(helper.kafkaQosHeaderName.sizeof()) - .name(helper.kafkaQosHeaderName) - .valueLen(valueBuffer.capacity()) - .value(valueBuffer, 0, valueBuffer.capacity()))))); - } - } - } - else - { - for (int level = 0; level < maxQos; level++) - { - final DirectBuffer valueBuffer = qosNames.get(level).value(); - fi.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> - h.nameLen(helper.kafkaQosHeaderName.sizeof()) - .name(helper.kafkaQosHeaderName) - .valueLen(valueBuffer.capacity()) - .value(valueBuffer, 0, valueBuffer.capacity()))))); - } - } - }); - } - }); - })) - .build(); - - doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, reserved, kafkaFlushEx); - } - - private void doKafkaEnd( - long traceId, - long authorization) - { - if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.initialClosed(state)) - { - initialSeq = mqtt.initialSeq; - initialAck = mqtt.initialAck; - initialMax = mqtt.initialMax; - state = MqttKafkaState.closeInitial(state); - - doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); - } - } - - private void doKafkaAbort( - long traceId, - long authorization) - { - if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.initialClosed(state)) - { - initialSeq = mqtt.initialSeq; - initialAck = mqtt.initialAck; - initialMax = mqtt.initialMax; - state = MqttKafkaState.closeInitial(state); - - doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); - } - } - - private void onKafkaMessage( - int msgTypeId, - DirectBuffer buffer, - int index, - int length) - { - switch (msgTypeId) - { - case BeginFW.TYPE_ID: - final BeginFW begin = beginRO.wrap(buffer, index, index + length); - onKafkaBegin(begin); - break; - case DataFW.TYPE_ID: - final DataFW data = dataRO.wrap(buffer, index, index + length); - onKafkaData(data); - break; - case EndFW.TYPE_ID: - final EndFW end = endRO.wrap(buffer, index, index + length); - onKafkaEnd(end); - break; - case AbortFW.TYPE_ID: - final AbortFW abort = abortRO.wrap(buffer, index, index + length); - onKafkaAbort(abort); - break; - case FlushFW.TYPE_ID: - final FlushFW flush = flushRO.wrap(buffer, index, index + length); - onKafkaFlush(flush); - break; - case WindowFW.TYPE_ID: - final WindowFW window = windowRO.wrap(buffer, index, index + length); - onKafkaWindow(window); - break; - case ResetFW.TYPE_ID: - final ResetFW reset = resetRO.wrap(buffer, index, index + length); - onKafkaReset(reset); - break; - } - } - - private void onKafkaBegin( - BeginFW begin) + private void onKafkaBegin( + BeginFW begin) { final long sequence = begin.sequence(); final long acknowledge = begin.acknowledge(); @@ -1601,77 +1400,24 @@ private void onKafkaData( } } - private void flushData( - long traceId, - long authorization, - long budgetId) + private void onKafkaFlush( + FlushFW flush) { - int length = Math.max(Math.min(mqtt.replyWindow() - mqtt.replyPad, messageSlotLimit - messageSlotOffset), 0); - int reserved = length + mqtt.replyPad; - if (length > 0) - { - final MutableDirectBuffer dataBuffer = bufferPool.buffer(dataSlot); - final MqttSubscribeMessageFW message = mqttSubscribeMessageRO.wrap(dataBuffer, messageSlotOffset, - dataBuffer.capacity()); - mqtt.doMqttData(traceId, authorization, budgetId, reserved, bufferedDataFlags, message.payload(), - message.extension()); - - messageSlotOffset += message.sizeof(); - if (messageSlotOffset == messageSlotLimit) - { - bufferPool.release(dataSlot); - dataSlot = NO_SLOT; - messageSlotLimit = 0; - messageSlotOffset = 0; - } - } - } - - private void cleanup( - long traceId, - long authorization) - { - mqtt.doMqttAbort(traceId, authorization); - doKafkaAbort(traceId, authorization); - } - - private void onKafkaEnd( - EndFW end) - { - final long sequence = end.sequence(); - final long acknowledge = end.acknowledge(); - final long traceId = end.traceId(); - final long authorization = end.authorization(); + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + final long budgetId = flush.budgetId(); + final int reserved = flush.reserved(); + final OctetsFW extension = flush.extension(); + final ExtensionFW flushEx = extension.get(extensionRO::tryWrap); + final KafkaFlushExFW kafkaFlushEx = + flushEx != null && flushEx.typeId() == kafkaTypeId ? extension.get(kafkaFlushExRO::tryWrap) : null; assert acknowledge <= sequence; assert sequence >= replySeq; - replySeq = sequence; - state = MqttKafkaState.closeReply(state); - - assert replyAck <= replySeq; - - mqtt.doMqttEnd(traceId, authorization); - } - - private void onKafkaFlush( - FlushFW flush) - { - final long sequence = flush.sequence(); - final long acknowledge = flush.acknowledge(); - final long traceId = flush.traceId(); - final long authorization = flush.authorization(); - final long budgetId = flush.budgetId(); - final int reserved = flush.reserved(); - final OctetsFW extension = flush.extension(); - final ExtensionFW flushEx = extension.get(extensionRO::tryWrap); - final KafkaFlushExFW kafkaFlushEx = - flushEx != null && flushEx.typeId() == kafkaTypeId ? extension.get(kafkaFlushExRO::tryWrap) : null; - - assert acknowledge <= sequence; - assert sequence >= replySeq; - - replySeq = sequence; + replySeq = sequence + reserved; assert replyAck <= replySeq; final KafkaMergedConsumerFlushExFW kafkaConsumerFlushEx = kafkaFlushEx != null && @@ -1708,6 +1454,25 @@ private void onKafkaFlush( } } + private void onKafkaEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + mqtt.doMqttEnd(traceId, authorization); + } + private void onKafkaAbort( AbortFW abort) { @@ -1727,6 +1492,23 @@ private void onKafkaAbort( mqtt.doMqttAbort(traceId, authorization); } + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= mqtt.initialAck; + + mqtt.initialAck = acknowledge; + + assert mqtt.initialAck <= mqtt.initialSeq; + + mqtt.doMqttReset(traceId); + } + private void onKafkaWindow( WindowFW window) { @@ -1752,21 +1534,200 @@ private void onKafkaWindow( mqtt.doMqttWindow(authorization, traceId, budgetId, padding, capabilities); } - private void onKafkaReset( - ResetFW reset) + private void doKafkaBegin( + long traceId, + long authorization, + long affinity, + Array32FW filters) { - final long sequence = reset.sequence(); - final long acknowledge = reset.acknowledge(); - final long traceId = reset.traceId(); + if (!MqttKafkaState.initialOpening(state)) + { + final Array32FW.Builder filterBuilder = + filtersRW.wrap(filterBuffer, 0, filterBuffer.capacity()); - assert acknowledge <= sequence; - assert acknowledge >= mqtt.initialAck; + filters.forEach(f -> + { + if (matchesTopicFilter(f.pattern().asString())) + { + int subscriptionId = (int) f.subscriptionId(); + if (!messagesSubscriptionIds.contains(subscriptionId)) + { + messagesSubscriptionIds.add(subscriptionId); + } + filterBuilder.item(fb -> fb + .subscriptionId(subscriptionId).qos(f.qos()).flags(f.flags()).pattern(f.pattern())); + } + }); - mqtt.initialAck = acknowledge; + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; + state = MqttKafkaState.openingInitial(state); - assert mqtt.initialAck <= mqtt.initialSeq; + kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, mqtt.clientId, topic, filterBuilder.build(), mqtt.qos, + KafkaOffsetType.LIVE); + } + } - mqtt.doMqttReset(traceId); + @Override + protected void doKafkaConsumerFlush( + long traceId, + long authorization, + long budgetId, + int reserved, + OffsetCommit offsetCommit) + { + final int qos = offsetCommit.qos; + final PartitionOffset offset = offsetCommit.partitionOffset; + final MqttOffsetStateFlags state = offsetCommit.state; + final int packetId = offsetCommit.packetId; + + + if (qos == MqttQoS.EXACTLY_ONCE.value() && state == MqttOffsetStateFlags.COMPLETE) + { + incompletePacketIds.computeIfAbsent(offset.partitionId, c -> new IntArrayList()).removeInt(packetId); + } + else if (state == MqttOffsetStateFlags.INCOMPLETE) + { + incompletePacketIds.computeIfAbsent(offset.partitionId, c -> new IntArrayList()).add(packetId); + } + + final int correlationId = state == MqttOffsetStateFlags.INCOMPLETE ? packetId : -1; + + final KafkaFlushExFW kafkaFlushEx = + kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.consumer(f -> + { + f.progress(p -> + { + p.partitionId(offset.partitionId).partitionOffset(offset.offset + 1); + final IntArrayList incomplete = incompletePacketIds.get(offset.partitionId); + final String16FW partitionMetadata = incomplete == null || incomplete.isEmpty() ? + EMPTY_STRING : offsetMetadataListToString(incomplete); + p.metadata(partitionMetadata); + }); + f.correlationId(correlationId); + })) + .build(); + + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, kafkaFlushEx); + } + + private void doKafkaFlush( + long traceId, + long authorization, + long budgetId, + int reserved, + int qos, + Array32FW filters) + { + initialSeq = mqtt.initialSeq; + + messagesSubscriptionIds.clear(); + + final KafkaFlushExFW kafkaFlushEx = + kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.fetch(f -> + { + f.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); + filters.forEach(filter -> + { + if (matchesTopicFilter(filter.pattern().asString())) + { + final int subscriptionId = (int) filter.subscriptionId(); + if (!messagesSubscriptionIds.contains(subscriptionId)) + { + messagesSubscriptionIds.add(subscriptionId); + } + if ((filter.flags() & SEND_RETAIN_FLAG) != 0) + { + mqtt.retainAvailable = true; + } + f.filtersItem(fi -> + { + fi.conditionsItem(ci -> buildHeaders(ci, filter.pattern().asString())); + + final boolean noLocal = (filter.flags() & NO_LOCAL_FLAG) != 0; + if (noLocal) + { + final DirectBuffer valueBuffer = mqtt.clientId.value(); + fi.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> + h.nameLen(helper.kafkaLocalHeaderName.sizeof()) + .name(helper.kafkaLocalHeaderName) + .valueLen(valueBuffer.capacity()) + .value(valueBuffer, 0, valueBuffer.capacity()))))); + } + + final int maxQos = filter.qos(); + if (maxQos != qos || maxQos == MqttQoS.EXACTLY_ONCE.value()) + { + for (int level = 0; level <= MqttQoS.EXACTLY_ONCE.value(); level++) + { + if (level != qos) + { + final DirectBuffer valueBuffer = qosNames.get(level).value(); + fi.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> + h.nameLen(helper.kafkaQosHeaderName.sizeof()) + .name(helper.kafkaQosHeaderName) + .valueLen(valueBuffer.capacity()) + .value(valueBuffer, 0, valueBuffer.capacity()))))); + } + } + } + else + { + for (int level = 0; level < maxQos; level++) + { + final DirectBuffer valueBuffer = qosNames.get(level).value(); + fi.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> + h.nameLen(helper.kafkaQosHeaderName.sizeof()) + .name(helper.kafkaQosHeaderName) + .valueLen(valueBuffer.capacity()) + .value(valueBuffer, 0, valueBuffer.capacity()))))); + } + } + }); + } + }); + })) + .build(); + + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, kafkaFlushEx); + } + + private void doKafkaEnd( + long traceId, + long authorization) + { + if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.initialClosed(state)) + { + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.initialClosed(state)) + { + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } } private void doKafkaReset( @@ -1815,7 +1776,7 @@ private void doKafkaWindow( } } - public void flushDataIfNecessary( + private void flushDataIfNecessary( long traceId, long authorization, long budgetId) @@ -1825,6 +1786,46 @@ public void flushDataIfNecessary( flushData(traceId, authorization, budgetId); } } + + private void flushData( + long traceId, + long authorization, + long budgetId) + { + int length = Math.max(Math.min(mqtt.replyWindow() - mqtt.replyPad, messageSlotLimit - messageSlotOffset), 0); + int reserved = length + mqtt.replyPad; + if (length > 0) + { + final MutableDirectBuffer dataBuffer = bufferPool.buffer(dataSlot); + final MqttSubscribeMessageFW message = mqttSubscribeMessageRO.wrap(dataBuffer, messageSlotOffset, + dataBuffer.capacity()); + mqtt.doMqttData(traceId, authorization, budgetId, reserved, bufferedDataFlags, message.payload(), + message.extension()); + + messageSlotOffset += message.sizeof(); + if (messageSlotOffset == messageSlotLimit) + { + bufferPool.release(dataSlot); + dataSlot = NO_SLOT; + messageSlotLimit = 0; + messageSlotOffset = 0; + } + } + } + + private boolean matchesTopicFilter( + String topicFilter) + { + return routeConfig.matches(topicFilter, MqttKafkaConditionKind.SUBSCRIBE); + } + + private void cleanup( + long traceId, + long authorization) + { + mqtt.doMqttAbort(traceId, authorization); + doKafkaAbort(traceId, authorization); + } } private IntArrayList stringToOffsetMetadataList( @@ -1832,7 +1833,7 @@ private IntArrayList stringToOffsetMetadataList( { final IntArrayList metadataList = new IntArrayList(); UnsafeBuffer buffer = new UnsafeBuffer(BitUtil.fromHex(metadata.asString())); - final MqttOffsetMetadataFW offsetMetadata = mqttOffsetMetadataRO.wrap(buffer, 0, buffer.capacity()); + final MqttSubscribeOffsetMetadataFW offsetMetadata = mqttOffsetMetadataRO.wrap(buffer, 0, buffer.capacity()); offsetMetadata.packetIds().forEachRemaining((IntConsumer) metadataList::add); return metadataList; } @@ -1843,7 +1844,7 @@ private String16FW offsetMetadataListToString( mqttOffsetMetadataRW.wrap(offsetBuffer, 0, offsetBuffer.capacity()); mqttOffsetMetadataRW.version(OFFSET_METADATA_VERSION); metadataList.forEach(p -> mqttOffsetMetadataRW.appendPacketIds(p.shortValue())); - final MqttOffsetMetadataFW offsetMetadata = mqttOffsetMetadataRW.build(); + final MqttSubscribeOffsetMetadataFW offsetMetadata = mqttOffsetMetadataRW.build(); return new String16FW(BitUtil.toHex(offsetMetadata.buffer().byteArray(), offsetMetadata.offset(), offsetMetadata.limit())); } @@ -1871,192 +1872,24 @@ final class KafkaRetainedProxy extends KafkaProxy private int replyMax; private int replyPad; - private int unAckedPackets; - private boolean expiredMessage; - - private KafkaRetainedProxy( - long originId, - long routedId, - String16FW topic, - MqttSubscribeProxy mqtt) - { - this.originId = originId; - this.routedId = routedId; - this.topic = topic; - this.topicKey = System.identityHashCode(topic.asString().intern()); - this.mqtt = mqtt; - this.initialId = supplyInitialId.applyAsLong(routedId); - this.replyId = supplyReplyId.applyAsLong(initialId); - this.incompletePacketIds = new Int2ObjectHashMap<>(); - this.unAckedPackets = 0; - } - - private void doKafkaBegin( - long traceId, - long authorization, - long affinity, - List newRetainedFilters) - { - state = 0; - replySeq = 0; - replyAck = 0; - replyMax = 0; - - final Array32FW.Builder filterBuilder = - filtersRW.wrap(filterBuffer, 0, filterBuffer.capacity()); - - newRetainedFilters.forEach(f -> - { - final int subscriptionId = f.id; - if (!mqtt.retainedSubscriptionIds.contains(subscriptionId)) - { - mqtt.retainedSubscriptionIds.add(subscriptionId); - } - filterBuilder.item(fb -> fb - .subscriptionId(subscriptionId).qos(f.qos).flags(f.flags).pattern(f.filter)); - final boolean rap = (f.flags & RETAIN_AS_PUBLISHED_FLAG) != 0; - mqtt.retainAsPublished.put(f.id, rap); - }); - mqtt.retainedSubscriptions.addAll(newRetainedFilters); - - Array32FW retainedFilters = filterBuilder.build(); - - initialSeq = mqtt.initialSeq; - initialAck = mqtt.initialAck; - initialMax = mqtt.initialMax; - - state = MqttKafkaState.openingInitial(state); - - kafka = - newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, mqtt.clientId, topic, retainedFilters, mqtt.qos, - KafkaOffsetType.HISTORICAL); - } - - @Override - protected void doKafkaConsumerFlush( - long traceId, - long authorization, - long budgetId, - int reserved, - OffsetCommit offsetCommit) - { - final int qos = offsetCommit.qos; - final PartitionOffset offset = offsetCommit.partitionOffset; - final MqttOffsetStateFlags state = offsetCommit.state; - final int packetId = offsetCommit.packetId; - - if (qos == MqttQoS.EXACTLY_ONCE.value() && state == MqttOffsetStateFlags.COMPLETE) - { - final IntArrayList incompletes = incompletePacketIds.get(offset.partitionId); - incompletes.removeInt(packetId); - if (incompletes.isEmpty()) - { - incompletePacketIds.remove(offset.partitionId); - } - } - - if (state == MqttOffsetStateFlags.INCOMPLETE) - { - incompletePacketIds.computeIfAbsent(offset.partitionId, c -> new IntArrayList()).add(packetId); - } - - final int correlationId = state == MqttOffsetStateFlags.INCOMPLETE ? packetId : -1; - - final KafkaFlushExFW kafkaFlushEx = - kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m.consumer(f -> - { - f.progress(p -> - { - p.partitionId(offset.partitionId).partitionOffset(offset.offset + 1); - final IntArrayList incomplete = incompletePacketIds.get(offset.partitionId); - final String16FW partitionMetadata = incomplete == null || incomplete.isEmpty() ? - EMPTY_STRING : offsetMetadataListToString(incomplete); - p.metadata(partitionMetadata); - }); - f.correlationId(correlationId); - })) - .build(); - - doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, reserved, kafkaFlushEx); - } - - private void doKafkaFlush( - long traceId, - long authorization, - long budgetId, - int reserved, - int qos, - List retainedFiltersList) - { - initialSeq = mqtt.initialSeq; - - final Array32FW.Builder filterBuilder = - filtersRW.wrap(filterBuffer, 0, filterBuffer.capacity()); - - retainedFiltersList.forEach(f -> - { - final int subscriptionId = f.id; - if (!mqtt.retainedSubscriptionIds.contains(subscriptionId)) - { - mqtt.retainedSubscriptionIds.add(subscriptionId); - } - filterBuilder.item(fb -> fb - .subscriptionId(subscriptionId).qos(f.qos).flags(f.flags).pattern(f.filter)); - final boolean rap = (f.flags & RETAIN_AS_PUBLISHED_FLAG) != 0; - mqtt.retainAsPublished.put(f.id, rap); - }); - - Array32FW retainedFilters = filterBuilder.build(); - - final KafkaFlushExFW retainedKafkaFlushEx = - kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m.fetch(f -> - { - f.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); - retainedFilters.forEach(filter -> - f.filtersItem(fi -> - fi.conditionsItem(ci -> - buildHeaders(ci, filter.pattern().asString())))); - })) - .build(); - - doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, reserved, retainedKafkaFlushEx); - } - - private void doKafkaEnd( - long traceId, - long authorization) - { - if (!MqttKafkaState.initialClosed(state)) - { - initialSeq = mqtt.initialSeq; - initialAck = mqtt.initialAck; - initialMax = mqtt.initialMax; - state = MqttKafkaState.closeInitial(state); - - doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); - } - } + private int unAckedPackets; + private boolean expiredMessage; - private void doKafkaAbort( - long traceId, - long authorization) + private KafkaRetainedProxy( + long originId, + long routedId, + String16FW topic, + MqttSubscribeProxy mqtt) { - if (!MqttKafkaState.initialClosed(state)) - { - initialSeq = mqtt.initialSeq; - initialAck = mqtt.initialAck; - initialMax = mqtt.initialMax; - state = MqttKafkaState.closeInitial(state); - - doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); - } + this.originId = originId; + this.routedId = routedId; + this.topic = topic; + this.topicKey = System.identityHashCode(topic.asString().intern()); + this.mqtt = mqtt; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.incompletePacketIds = new Int2ObjectHashMap<>(); + this.unAckedPackets = 0; } private void onKafkaMessage( @@ -2300,25 +2133,6 @@ private void onKafkaData( } } - private void onKafkaEnd( - EndFW end) - { - final long sequence = end.sequence(); - final long acknowledge = end.acknowledge(); - final long traceId = end.traceId(); - final long authorization = end.authorization(); - - assert acknowledge <= sequence; - assert sequence >= replySeq; - - replySeq = sequence; - state = MqttKafkaState.closeReply(state); - - assert replyAck <= replySeq; - - mqtt.messages.values().forEach(m -> m.flushData(traceId, authorization, mqtt.replyBud)); - } - private void onKafkaFlush( FlushFW flush) { @@ -2337,7 +2151,7 @@ private void onKafkaFlush( assert acknowledge <= sequence; assert sequence >= replySeq; - replySeq = sequence; + replySeq = sequence + reserved; assert replyAck <= replySeq; @@ -2378,6 +2192,25 @@ private void onKafkaFlush( } } + private void onKafkaEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + mqtt.messages.values().forEach(m -> m.flushData(traceId, authorization, mqtt.replyBud)); + } + private void onKafkaAbort( AbortFW abort) { @@ -2397,6 +2230,23 @@ private void onKafkaAbort( mqtt.doMqttAbort(traceId, authorization); } + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= mqtt.initialAck; + + mqtt.initialAck = acknowledge; + + assert mqtt.initialAck <= mqtt.initialSeq; + + mqtt.doMqttReset(traceId); + } + private void onKafkaWindow( WindowFW window) { @@ -2422,21 +2272,172 @@ private void onKafkaWindow( mqtt.doMqttWindow(authorization, traceId, budgetId, padding, capabilities); } - private void onKafkaReset( - ResetFW reset) + private void doKafkaBegin( + long traceId, + long authorization, + long affinity, + List newRetainedFilters) { - final long sequence = reset.sequence(); - final long acknowledge = reset.acknowledge(); - final long traceId = reset.traceId(); + state = 0; + replySeq = 0; + replyAck = 0; + replyMax = 0; - assert acknowledge <= sequence; - assert acknowledge >= mqtt.initialAck; + final Array32FW.Builder filterBuilder = + filtersRW.wrap(filterBuffer, 0, filterBuffer.capacity()); - mqtt.initialAck = acknowledge; + newRetainedFilters.forEach(f -> + { + final int subscriptionId = f.id; + if (!mqtt.retainedSubscriptionIds.contains(subscriptionId)) + { + mqtt.retainedSubscriptionIds.add(subscriptionId); + } + filterBuilder.item(fb -> fb + .subscriptionId(subscriptionId).qos(f.qos).flags(f.flags).pattern(f.filter)); + final boolean rap = (f.flags & RETAIN_AS_PUBLISHED_FLAG) != 0; + mqtt.retainAsPublished.put(f.id, rap); + }); + mqtt.retainedSubscriptions.addAll(newRetainedFilters); - assert mqtt.initialAck <= mqtt.initialSeq; + Array32FW retainedFilters = filterBuilder.build(); - mqtt.doMqttReset(traceId); + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; + + state = MqttKafkaState.openingInitial(state); + + kafka = + newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, mqtt.clientId, topic, retainedFilters, mqtt.qos, + KafkaOffsetType.HISTORICAL); + } + + @Override + protected void doKafkaConsumerFlush( + long traceId, + long authorization, + long budgetId, + int reserved, + OffsetCommit offsetCommit) + { + final int qos = offsetCommit.qos; + final PartitionOffset offset = offsetCommit.partitionOffset; + final MqttOffsetStateFlags state = offsetCommit.state; + final int packetId = offsetCommit.packetId; + + if (qos == MqttQoS.EXACTLY_ONCE.value() && state == MqttOffsetStateFlags.COMPLETE) + { + final IntArrayList incompletes = incompletePacketIds.get(offset.partitionId); + incompletes.removeInt(packetId); + if (incompletes.isEmpty()) + { + incompletePacketIds.remove(offset.partitionId); + } + } + + if (state == MqttOffsetStateFlags.INCOMPLETE) + { + incompletePacketIds.computeIfAbsent(offset.partitionId, c -> new IntArrayList()).add(packetId); + } + + final int correlationId = state == MqttOffsetStateFlags.INCOMPLETE ? packetId : -1; + + final KafkaFlushExFW kafkaFlushEx = + kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.consumer(f -> + { + f.progress(p -> + { + p.partitionId(offset.partitionId).partitionOffset(offset.offset + 1); + final IntArrayList incomplete = incompletePacketIds.get(offset.partitionId); + final String16FW partitionMetadata = incomplete == null || incomplete.isEmpty() ? + EMPTY_STRING : offsetMetadataListToString(incomplete); + p.metadata(partitionMetadata); + }); + f.correlationId(correlationId); + })) + .build(); + + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, kafkaFlushEx); + } + + private void doKafkaFlush( + long traceId, + long authorization, + long budgetId, + int reserved, + int qos, + List retainedFiltersList) + { + initialSeq = mqtt.initialSeq; + + final Array32FW.Builder filterBuilder = + filtersRW.wrap(filterBuffer, 0, filterBuffer.capacity()); + + retainedFiltersList.forEach(f -> + { + final int subscriptionId = f.id; + if (!mqtt.retainedSubscriptionIds.contains(subscriptionId)) + { + mqtt.retainedSubscriptionIds.add(subscriptionId); + } + filterBuilder.item(fb -> fb + .subscriptionId(subscriptionId).qos(f.qos).flags(f.flags).pattern(f.filter)); + final boolean rap = (f.flags & RETAIN_AS_PUBLISHED_FLAG) != 0; + mqtt.retainAsPublished.put(f.id, rap); + }); + + Array32FW retainedFilters = filterBuilder.build(); + + final KafkaFlushExFW retainedKafkaFlushEx = + kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.fetch(f -> + { + f.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); + retainedFilters.forEach(filter -> + f.filtersItem(fi -> + fi.conditionsItem(ci -> + buildHeaders(ci, filter.pattern().asString())))); + })) + .build(); + + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, retainedKafkaFlushEx); + } + + private void doKafkaEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } } private void doKafkaReset( diff --git a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java index d1573b664c..fccc240c61 100644 --- a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java +++ b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java @@ -35,6 +35,7 @@ public class MqttKafkaConfigurationTest public static final String WILL_STREAM_RECONNECT_DELAY_NAME = "zilla.binding.mqtt.kafka.will.stream.reconnect"; public static final String BOOTSTRAP_AVAILABLE_NAME = "zilla.binding.mqtt.kafka.bootstrap.available"; public static final String BOOTSTRAP_STREAM_RECONNECT_DELAY_NAME = "zilla.binding.mqtt.kafka.bootstrap.stream.reconnect"; + public static final String PUBLISH_MAX_QOS_NAME = "zilla.binding.mqtt.kafka.publish.max.qos"; public static final String SESSION_ID_NAME = "zilla.binding.mqtt.kafka.session.id"; public static final String WILL_ID_NAME = "zilla.binding.mqtt.kafka.will.id"; public static final String LIFETIME_ID_NAME = "zilla.binding.mqtt.kafka.lifetime.id"; diff --git a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java index f39c48a682..6e169835a7 100644 --- a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java +++ b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java @@ -15,6 +15,8 @@ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.BOOTSTRAP_AVAILABLE_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.INSTANCE_ID_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.SESSION_ID_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.WILL_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_BUFFER_SLOT_CAPACITY; import static io.aklivity.zilla.runtime.engine.test.EngineRule.ENGINE_BUFFER_SLOT_CAPACITY_NAME; @@ -45,6 +47,10 @@ public class MqttKafkaPublishProxyIT .directory("target/zilla-itests") .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) + .configure(SESSION_ID_NAME, + "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + .configure(INSTANCE_ID_NAME, + "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/kafka/config") .external("kafka0") .clean(); @@ -85,17 +91,6 @@ public void shouldReceiveServerSentAbort() throws Exception k3po.finish(); } - @Test - @Configuration("proxy.yaml") - @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Specification({ - "${mqtt}/publish.server.sent.flush/client", - "${kafka}/publish.server.sent.flush/server"}) - public void shouldReceiveServerSentFlush() throws Exception - { - k3po.finish(); - } - @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") @@ -129,17 +124,6 @@ public void shouldPublishRetainedThenReceiveServerSentAbort() throws Exception k3po.finish(); } - @Test - @Configuration("proxy.yaml") - @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Specification({ - "${mqtt}/publish.retained.server.sent.flush/client", - "${kafka}/publish.retained.server.sent.flush/server"}) - public void shouldPublishRetainedThenReceiveServerSentFlush() throws Exception - { - k3po.finish(); - } - @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") @@ -306,6 +290,83 @@ public void shouldSendMessageQos2() throws Exception k3po.finish(); } + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Specification({ + "${mqtt}/publish.qos2.retained/client", + "${kafka}/publish.qos2.retained/server"}) + public void shouldSendMessageQos2Retained() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Specification({ + "${mqtt}/publish.qos2.recovery/client", + "${kafka}/publish.qos2.recovery/server"}) + public void shouldSendMessageQos2DuringRecovery() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Specification({ + "${mqtt}/publish.qos2.abort/client", + "${kafka}/publish.qos2.meta.abort/server"}) + public void shouldSessionReceiveQos2MetaSentAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Specification({ + "${mqtt}/publish.qos2.abort/client", + "${kafka}/publish.qos2.offset.fetch.abort/server"}) + public void shouldSessionReceiveQos2OffsetFetchSentAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Specification({ + "${mqtt}/publish.qos2.abort/client", + "${kafka}/publish.qos2.init.producer.abort/server"}) + public void shouldSessionReceiveQos2InitProducerSentAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Specification({ + "${mqtt}/publish.qos2.offset.commit.abort.phase1/client", + "${kafka}/publish.qos2.offset.commit.abort.phase1/server"}) + public void shouldPublishReceiveQos2OffsetCommitSentAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Specification({ + "${mqtt}/publish.qos2.offset.commit.abort.phase2/client", + "${kafka}/publish.qos2.offset.commit.abort.phase2/server"}) + public void shouldSessionReceiveQos2OffsetCommitSentAbort() throws Exception + { + k3po.finish(); + } + @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") diff --git a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java index ca16c879ac..945b86d372 100644 --- a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java +++ b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java @@ -16,6 +16,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.INSTANCE_ID_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.LIFETIME_ID_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.PUBLISH_MAX_QOS_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.SESSION_ID_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.TIME_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.WILL_AVAILABLE_NAME; @@ -72,6 +73,7 @@ public class MqttKafkaSessionProxyIT @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.connect.override.max.session.expiry/client", "${kafka}/session.connect.override.max.session.expiry/server"}) @@ -83,6 +85,7 @@ public void shouldConnectServerOverridesSessionExpiryTooBig() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.connect.override.min.session.expiry/client", "${kafka}/session.connect.override.min.session.expiry/server"}) @@ -94,6 +97,7 @@ public void shouldConnectServerOverridesSessionExpiryTooSmall() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.abort.reconnect.non.clean.start/client", "${kafka}/session.abort.reconnect.non.clean.start/server"}) @@ -105,6 +109,7 @@ public void shouldReconnectNonCleanStart() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.client.takeover/client", "${kafka}/session.client.takeover/server"}) @@ -116,6 +121,7 @@ public void shouldTakeOverSession() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.exists.clean.start/client", "${kafka}/session.exists.clean.start/server"}) @@ -127,6 +133,8 @@ public void shouldRemoveSessionAtCleanStart() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "0") @Specification({ "${mqtt}/session.subscribe/client", "${kafka}/session.subscribe/server"}) @@ -138,6 +146,7 @@ public void shouldSubscribeSaveSubscriptionsInSession() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.subscribe.via.session.state/client", "${kafka}/session.subscribe.via.session.state/server"}) @@ -149,6 +158,7 @@ public void shouldReceiveMessageSubscribedViaSessionState() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.unsubscribe.after.subscribe/client", "${kafka}/session.unsubscribe.after.subscribe/server"}) @@ -160,6 +170,7 @@ public void shouldUnsubscribeAndUpdateSessionState() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.unsubscribe.via.session.state/client", "${kafka}/session.unsubscribe.via.session.state/server"}) @@ -171,6 +182,7 @@ public void shouldUnsubscribeViaSessionState() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.client.sent.reset/client", "${kafka}/session.client.sent.reset/server"}) @@ -182,6 +194,7 @@ public void shouldSessionStreamReceiveClientSentReset() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.server.sent.reset/client", "${kafka}/session.server.sent.reset/server"}) @@ -193,6 +206,7 @@ public void shouldSessionStreamReceiveServerSentReset() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.server.sent.reset/client", "${kafka}/session.group.server.sent.reset/server"}) @@ -205,6 +219,7 @@ public void shouldGroupStreamReceiveServerSentReset() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.group.reset.not.authorized/client", "${kafka}/session.group.reset.not.authorized/server"}) @@ -217,6 +232,7 @@ public void shouldGroupStreamReceiveResetNotAuthorized() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.group.reset.invalid.session.timeout/client", "${kafka}/session.group.reset.invalid.session.timeout/server"}) @@ -229,6 +245,7 @@ public void shouldGroupStreamReceiveResetInvalidSessionTimeout() throws Exceptio @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.group.reset.invalid.describe.config/client", "${kafka}/session.group.reset.invalid.describe.config/server"}) @@ -240,6 +257,7 @@ public void shouldGroupStreamReceiveResetInvalidDescribeConfig() throws Exceptio @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Configure(name = SESSION_ID_NAME, value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ @@ -252,6 +270,7 @@ public void shouldRedirect() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.close.expire.session.state/client", "${kafka}/session.close.expire.session.state/server"}) @@ -262,6 +281,7 @@ public void shouldExpireSessionOnClose() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.abort.expire.session.state/client", "${kafka}/session.abort.expire.session.state/server"}) @@ -272,6 +292,7 @@ public void shouldExpireSessionOnAbort() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${kafka}/session.cancel.session.expiry/server"}) public void shouldCancelSessionExpiry() throws Exception @@ -281,6 +302,7 @@ public void shouldCancelSessionExpiry() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${kafka}/session.session.expiry.fragmented/server"}) public void shouldDecodeSessionExpirySignalFragmented() throws Exception @@ -290,6 +312,7 @@ public void shouldDecodeSessionExpirySignalFragmented() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${kafka}/session.expiry.after.signal.stream.restart/server"}) public void shouldExpireSessionAfterSignalStreamRestart() throws Exception @@ -299,6 +322,7 @@ public void shouldExpireSessionAfterSignalStreamRestart() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.will.message.normal.disconnect/client", "${kafka}/session.will.message.normal.disconnect/server"}) @@ -309,6 +333,7 @@ public void shouldNotSendWillMessageOnNormalDisconnect() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.will.message.clean.start/client", "${kafka}/session.will.message.clean.start/server"}) @@ -319,6 +344,7 @@ public void shouldGenerateLifeTimeIdOnCleanStart() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.will.message.abort.deliver.will/client", "${kafka}/session.will.message.abort.deliver.will/server"}) @@ -329,6 +355,7 @@ public void shouldSendWillMessageOnAbort() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.will.message.10k.abort.deliver.will/client", "${kafka}/session.will.message.10k.abort.deliver.will/server"}) @@ -339,6 +366,7 @@ public void shouldSendWillMessage10kOnAbort() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.will.message.abort.deliver.will/client", "${kafka}/session.will.message.will.id.mismatch.skip.delivery/server"}) @@ -349,6 +377,7 @@ public void shouldNotSendWillMessageOnWillIdMismatch() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.will.message.abort.deliver.will.retain/client", "${kafka}/session.will.message.abort.deliver.will.retain/server"}) @@ -359,6 +388,7 @@ public void shouldSaveWillMessageAsRetain() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${mqtt}/session.will.message.takeover.deliver.will/client", "${kafka}/session.will.message.takeover.deliver.will/server"}) @@ -369,6 +399,7 @@ public void shouldDeliverWillMessageOnSessionTakeover() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Specification({ "${kafka}/session.will.message.cancel.delivery/server"}) public void shouldCancelWillDelivery() throws Exception @@ -381,6 +412,7 @@ public void shouldCancelWillDelivery() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Configure(name = WILL_STREAM_RECONNECT_DELAY_NAME, value = "1") @Specification({ "${kafka}/session.will.stream.end.reconnect/server"}) @@ -391,6 +423,7 @@ public void shouldReconnectWillStreamOnKafkaEnd() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Configure(name = WILL_STREAM_RECONNECT_DELAY_NAME, value = "1") @Specification({ "${kafka}/session.will.stream.abort.reconnect/server"}) @@ -401,6 +434,7 @@ public void shouldReconnectWillStreamOnKafkaAbort() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = PUBLISH_MAX_QOS_NAME, value = "1") @Configure(name = WILL_STREAM_RECONNECT_DELAY_NAME, value = "1") @Specification({ "${kafka}/session.will.stream.reset.reconnect/server"}) diff --git a/runtime/binding-mqtt/pom.xml b/runtime/binding-mqtt/pom.xml index 25a01c1646..691257bb8d 100644 --- a/runtime/binding-mqtt/pom.xml +++ b/runtime/binding-mqtt/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttOptionsConfig.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttOptionsConfig.java index a932bf4f0e..4bc03b19a3 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttOptionsConfig.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttOptionsConfig.java @@ -15,15 +15,21 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.config; +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; + import java.util.List; +import java.util.Objects; import java.util.function.Function; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttVersion; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; public class MqttOptionsConfig extends OptionsConfig { public final MqttAuthorizationConfig authorization; public final List topics; + public final List versions; public static MqttOptionsConfigBuilder builder() { @@ -38,9 +44,17 @@ public static MqttOptionsConfigBuilder builder( public MqttOptionsConfig( MqttAuthorizationConfig authorization, - List topics) + List topics, + List versions) { + super(topics != null && !topics.isEmpty() + ? topics.stream() + .map(t -> t.content) + .filter(Objects::nonNull) + .collect(toList()) + : emptyList()); this.authorization = authorization; this.topics = topics; + this.versions = versions; } } diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttOptionsConfigBuilder.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttOptionsConfigBuilder.java index b17f237ff1..9d0dbd3f37 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttOptionsConfigBuilder.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttOptionsConfigBuilder.java @@ -19,6 +19,7 @@ import java.util.List; import java.util.function.Function; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttVersion; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; @@ -28,6 +29,7 @@ public class MqttOptionsConfigBuilder extends ConfigBuilder topics; + private List versions; MqttOptionsConfigBuilder( Function mapper) @@ -65,6 +67,28 @@ public MqttOptionsConfigBuilder topic( return this; } + public MqttOptionsConfigBuilder versions( + List versions) + { + if (versions == null) + { + versions = new LinkedList<>(); + } + this.versions = versions; + return this; + } + + public MqttOptionsConfigBuilder version( + MqttVersion version) + { + if (this.versions == null) + { + this.versions = new LinkedList<>(); + } + this.versions.add(version); + return this; + } + public MqttTopicConfigBuilder> topic() { return new MqttTopicConfigBuilder<>(this::topic); @@ -85,6 +109,6 @@ public MqttAuthorizationConfigBuilder> authorization @Override public T build() { - return mapper.apply(new MqttOptionsConfig(authorization, topics)); + return mapper.apply(new MqttOptionsConfig(authorization, topics, versions)); } } diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttTopicConfig.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttTopicConfig.java index bcba8eb11f..586ee4e318 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttTopicConfig.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttTopicConfig.java @@ -17,16 +17,16 @@ import static java.util.function.Function.identity; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; public class MqttTopicConfig { public final String name; - public final ValidatorConfig content; + public final ModelConfig content; public MqttTopicConfig( String name, - ValidatorConfig content) + ModelConfig content) { this.name = name; this.content = content; diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttTopicConfigBuilder.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttTopicConfigBuilder.java index 382d56b951..4a4dd1c372 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttTopicConfigBuilder.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttTopicConfigBuilder.java @@ -18,14 +18,14 @@ import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; public class MqttTopicConfigBuilder extends ConfigBuilder> { private final Function mapper; private String name; - private ValidatorConfig content; + private ModelConfig content; MqttTopicConfigBuilder( Function mapper) @@ -48,14 +48,14 @@ public MqttTopicConfigBuilder name( } public MqttTopicConfigBuilder content( - ValidatorConfig content) + ModelConfig content) { this.content = content; return this; } public , C>> C content( - Function>, C> content) + Function>, C> content) { return content.apply(this::content); } diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBinding.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBinding.java index bc85f1747e..a29b96e302 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBinding.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBinding.java @@ -16,10 +16,6 @@ package io.aklivity.zilla.runtime.binding.mqtt.internal; import java.net.URL; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; - -import org.agrona.collections.IntArrayList; import io.aklivity.zilla.runtime.engine.EngineContext; import io.aklivity.zilla.runtime.engine.binding.Binding; @@ -31,13 +27,11 @@ public final class MqttBinding implements Binding private final MqttConfiguration config; - private final ConcurrentMap unreleasedPacketIdsByClientId; MqttBinding( MqttConfiguration config) { this.config = config; - this.unreleasedPacketIdsByClientId = new ConcurrentHashMap<>(); } @Override @@ -70,6 +64,6 @@ public String routedType( public MqttBindingContext supply( EngineContext context) { - return new MqttBindingContext(config, context, unreleasedPacketIdsByClientId); + return new MqttBindingContext(config, context); } } diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java index 20ab283eea..3b45fb60e5 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java @@ -20,9 +20,6 @@ import java.util.EnumMap; import java.util.Map; -import java.util.concurrent.ConcurrentMap; - -import org.agrona.collections.IntArrayList; import io.aklivity.zilla.runtime.binding.mqtt.internal.stream.MqttClientFactory; import io.aklivity.zilla.runtime.binding.mqtt.internal.stream.MqttServerFactory; @@ -40,11 +37,10 @@ final class MqttBindingContext implements BindingContext MqttBindingContext( MqttConfiguration config, - EngineContext context, - ConcurrentMap unreleasedPacketIdsByClientId) + EngineContext context) { final EnumMap factories = new EnumMap<>(KindConfig.class); - factories.put(SERVER, new MqttServerFactory(config, context, unreleasedPacketIdsByClientId)); + factories.put(SERVER, new MqttServerFactory(config, context)); factories.put(CLIENT, new MqttClientFactory(config, context)); this.factories = factories; } diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java index 3ea0e7e6b1..875be68ee9 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java @@ -17,6 +17,7 @@ import static java.util.stream.Collectors.toList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.function.Function; @@ -32,11 +33,13 @@ import io.aklivity.zilla.runtime.engine.EngineContext; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.guard.GuardHandler; public final class MqttBindingConfig { private static final Function DEFAULT_CREDENTIALS = x -> null; + private static final List DEFAULT_VERSIONS = Arrays.asList(MqttVersion.V3_1_1, MqttVersion.V_5); public final long id; public final String name; @@ -44,8 +47,10 @@ public final class MqttBindingConfig public final MqttOptionsConfig options; public final List routes; public final Function credentials; - public final Map topics; + public final Map topics; + public final List versions; public final ToLongFunction resolveId; + public final GuardHandler guard; public MqttBindingConfig( BindingConfig binding, @@ -62,8 +67,10 @@ public MqttBindingConfig( this.topics = options != null && options.topics != null ? options.topics.stream() - .collect(Collectors.toMap(t -> t.name, - t -> context.createValidator(t.content, resolveId))) : null; + .collect(Collectors.toMap(t -> t.name, t -> t.content)) : null; + this.guard = resolveGuard(context); + this.versions = options != null && + options.versions != null ? options.versions : DEFAULT_VERSIONS; } public MqttRouteConfig resolve( @@ -105,6 +112,12 @@ public MqttRouteConfig resolvePublish( .orElse(null); } + public ModelConfig supplyModelConfig( + String topic) + { + return topics != null ? topics.getOrDefault(topic, null) : null; + } + public Function credentials() { return credentials; @@ -116,6 +129,21 @@ public MqttConnectProperty authField() options.authorization.credentials.connect.get(0).property : null; } + private GuardHandler resolveGuard( + EngineContext context) + { + GuardHandler guard = null; + + if (options != null && + options.authorization != null) + { + long guardId = resolveId.applyAsLong(options.authorization.name); + guard = context.supplyGuard(guardId); + } + + return guard; + } + private Function asAccessor( MqttCredentialsConfig credentials) { diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java index a6bcda25ea..944e551b50 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java @@ -43,6 +43,7 @@ public class MqttOptionsConfigAdapter implements OptionsConfigAdapterSpi, JsonbA private static final String AUTHORIZATION_CREDENTIALS_NAME = "credentials"; private static final String AUTHORIZATION_CREDENTIALS_CONNECT_NAME = "connect"; private static final String TOPICS_NAME = "topics"; + private static final String VERSIONS_NAME = "versions"; private final MqttTopicConfigAdapter mqttTopic = new MqttTopicConfigAdapter(); @@ -104,6 +105,13 @@ public JsonObject adaptToJson( object.add(TOPICS_NAME, topics); } + if (mqttOptions.versions != null) + { + JsonArrayBuilder versions = Json.createArrayBuilder(); + mqttOptions.versions.forEach(v -> versions.add(v.specification())); + object.add(VERSIONS_NAME, versions); + } + return object.build(); } @@ -153,6 +161,14 @@ public OptionsConfig adaptFromJson( mqttOptions.topics(topics); } + if (object.containsKey(VERSIONS_NAME)) + { + List versions = object.getJsonArray(VERSIONS_NAME).stream() + .map(item -> MqttVersion.ofSpecification(((JsonString) item).getString())) + .collect(Collectors.toList()); + mqttOptions.versions(versions); + } + return mqttOptions.build(); } } diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttTopicConfigAdapter.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttTopicConfigAdapter.java index ba00810967..2105c49863 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttTopicConfigAdapter.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttTopicConfigAdapter.java @@ -23,14 +23,14 @@ import io.aklivity.zilla.runtime.binding.mqtt.config.MqttTopicConfig; import io.aklivity.zilla.runtime.binding.mqtt.config.MqttTopicConfigBuilder; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapter; +import io.aklivity.zilla.runtime.engine.config.ModelConfigAdapter; public class MqttTopicConfigAdapter implements JsonbAdapter { private static final String NAME_NAME = "name"; private static final String CONTENT_NAME = "content"; - private final ValidatorConfigAdapter validator = new ValidatorConfigAdapter(); + private final ModelConfigAdapter model = new ModelConfigAdapter(); @Override public JsonObject adaptToJson( @@ -44,8 +44,8 @@ public JsonObject adaptToJson( if (topic.content != null) { - validator.adaptType(topic.content.type); - JsonValue content = validator.adaptToJson(topic.content); + model.adaptType(topic.content.model); + JsonValue content = model.adaptToJson(topic.content); object.add(CONTENT_NAME, content); } @@ -65,7 +65,7 @@ public MqttTopicConfig adaptFromJson( if (object.containsKey(CONTENT_NAME)) { JsonValue contentJson = object.get(CONTENT_NAME); - mqttTopic.content(validator.adaptFromJson(contentJson)); + mqttTopic.content(model.adaptFromJson(contentJson)); } return mqttTopic.build(); } diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttVersion.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttVersion.java new file mode 100644 index 0000000000..c75f654395 --- /dev/null +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttVersion.java @@ -0,0 +1,72 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.internal.config; +public enum MqttVersion +{ + V3_1_1("v3.1.1", 4), + V_5("v5", 5); + + private final String specification; + private final int protocol; + + MqttVersion( + String specification, + int protocol) + { + this.specification = specification; + this.protocol = protocol; + } + + public String specification() + { + return specification; + } + + public int protocol() + { + return protocol; + } + + static MqttVersion ofSpecification( + String specification) + { + MqttVersion version = null; + for (MqttVersion v : values()) + { + if (v.specification().equals(specification)) + { + version = v; + break; + } + } + return version; + } + + public static MqttVersion ofProtocol( + int protocol) + { + MqttVersion version = null; + for (MqttVersion v : values()) + { + if (v.protocol() == protocol) + { + version = v; + break; + } + } + return version; + } +} diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java index 201b86f7f9..69e6367e3c 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java @@ -1567,7 +1567,7 @@ private int onDecodeConnack( .session(sessionBuilder -> sessionBuilder .flags(flags) .expiry((int) TimeUnit.MILLISECONDS.toSeconds(sessionExpiry)) - .qosMax(maximumQos) + .subscribeQosMax(maximumQos) .packetSizeMax(maximumPacketSize) .capabilities(capabilities) .clientId(clientId)) @@ -1593,7 +1593,7 @@ private int onDecodeConnack( .session(sessionBuilder -> sessionBuilder .flags(flags) .expiry(sessionExpiry) - .qosMax(maximumQos) + .subscribeQosMax(maximumQos) .packetSizeMax(maximumPacketSize) .capabilities(capabilities) .clientId(clientId)) diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 6f88fdb965..1c681d82ca 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -79,23 +79,22 @@ import java.util.EnumMap; import java.util.HashMap; import java.util.Iterator; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.PrimitiveIterator; import java.util.UUID; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.IntConsumer; import java.util.function.IntSupplier; import java.util.function.LongFunction; import java.util.function.LongSupplier; import java.util.function.LongUnaryOperator; import java.util.function.Supplier; -import java.util.function.ToLongFunction; import java.util.stream.Collectors; import org.agrona.DirectBuffer; @@ -103,18 +102,19 @@ import org.agrona.collections.Int2IntHashMap; import org.agrona.collections.Int2ObjectHashMap; import org.agrona.collections.IntArrayList; +import org.agrona.collections.Long2LongHashMap; import org.agrona.collections.Long2ObjectHashMap; import org.agrona.collections.MutableBoolean; import org.agrona.collections.Object2IntHashMap; import org.agrona.concurrent.UnsafeBuffer; -import io.aklivity.zilla.runtime.binding.mqtt.config.MqttOptionsConfig; import io.aklivity.zilla.runtime.binding.mqtt.config.MqttPatternConfig.MqttConnectProperty; import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttBinding; import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration; import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttValidator; import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttBindingConfig; import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttRouteConfig; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttVersion; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttBinaryFW; @@ -191,16 +191,18 @@ import io.aklivity.zilla.runtime.engine.buffer.BufferPool; import io.aklivity.zilla.runtime.engine.concurrent.Signaler; import io.aklivity.zilla.runtime.engine.config.BindingConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; import io.aklivity.zilla.runtime.engine.guard.GuardHandler; -import io.aklivity.zilla.runtime.engine.validator.Validator; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; public final class MqttServerFactory implements MqttStreamFactory { private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); private static final String16FW MQTT_PROTOCOL_NAME = new String16FW("MQTT", BIG_ENDIAN); - private static final int MQTT_PROTOCOL_VERSION_5 = 5; - private static final int MQTT_PROTOCOL_VERSION_4 = 4; + public static final int MQTT_PROTOCOL_VERSION_5 = 5; + public static final int MQTT_PROTOCOL_VERSION_4 = 4; private static final int MAXIMUM_CLIENT_ID_LENGTH = 36; private static final int CONNECT_FIXED_HEADER = 0b0001_0000; private static final int SUBSCRIBE_FIXED_HEADER = 0b1000_0010; @@ -225,6 +227,7 @@ public final class MqttServerFactory implements MqttStreamFactory private static final int SUBSCRIPTION_IDS_AVAILABLE_MASK = 1 << MqttServerCapabilities.SUBSCRIPTION_IDS.value(); private static final int SHARED_SUBSCRIPTIONS_AVAILABLE_MASK = 1 << MqttServerCapabilities.SHARED_SUBSCRIPTIONS.value(); + private static final int REDIRECT_MASK = 1 << MqttServerCapabilities.REDIRECT.value(); private static final int WILL_FLAG_MASK = 0b0000_0100; private static final int CLEAN_START_FLAG_MASK = 0b0000_0010; private static final int WILL_QOS_MASK = 0b0001_1000; @@ -285,7 +288,7 @@ public final class MqttServerFactory implements MqttStreamFactory private final FlushFW.Builder flushRW = new FlushFW.Builder(); private final MqttDataExFW mqttSubscribeDataExRO = new MqttDataExFW(); - private final MqttFlushExFW mqttSubscribeFlushExRO = new MqttFlushExFW(); + private final MqttFlushExFW mqttFlushExRO = new MqttFlushExFW(); private final MqttResetExFW mqttResetExRO = new MqttResetExFW(); private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); @@ -460,7 +463,6 @@ public final class MqttServerFactory implements MqttStreamFactory private final LongSupplier supplyTraceId; private final LongSupplier supplyBudgetId; private final LongFunction supplyDebitor; - private final LongFunction supplyGuard; private final Long2ObjectHashMap bindings; private final int mqttTypeId; @@ -476,14 +478,11 @@ public final class MqttServerFactory implements MqttStreamFactory private final Supplier supplyClientId; private final MqttValidator validator; private final CharsetDecoder utf8Decoder; - private final ConcurrentMap unreleasedPacketIdsByClientId; - - private Map validators; + private final Function supplyValidator; public MqttServerFactory( MqttConfiguration config, - EngineContext context, - ConcurrentMap unreleasedPacketIdsByClientId) + EngineContext context) { this.writeBuffer = context.writeBuffer(); this.extBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); @@ -506,7 +505,6 @@ public MqttServerFactory( this.supplyReplyId = context::supplyReplyId; this.supplyBudgetId = context::supplyBudgetId; this.supplyTraceId = context::supplyTraceId; - this.supplyGuard = context::supplyGuard; this.context = context; this.bindings = new Long2ObjectHashMap<>(); this.mqttTypeId = context.supplyTypeId(MqttBinding.NAME); @@ -526,7 +524,7 @@ public MqttServerFactory( this.decodePacketTypeByVersion = new Int2ObjectHashMap<>(); this.decodePacketTypeByVersion.put(MQTT_PROTOCOL_VERSION_4, this::decodePacketTypeV4); this.decodePacketTypeByVersion.put(MQTT_PROTOCOL_VERSION_5, this::decodePacketTypeV5); - this.unreleasedPacketIdsByClientId = unreleasedPacketIdsByClientId; + this.supplyValidator = context::supplyValidator; } @Override @@ -564,22 +562,17 @@ public MessageConsumer newStream( { final long initialId = begin.streamId(); final long affinity = begin.affinity(); - final long replyId = supplyReplyId.applyAsLong(initialId); - final long budgetId = supplyBudgetId.getAsLong(); - this.validators = binding.topics; newStream = new MqttServer( - binding.credentials(), - binding.authField(), - binding.options, - binding.resolveId, sender, originId, routedId, initialId, - replyId, affinity, - budgetId)::onNetwork; + binding.versions, + binding.guard, + binding.credentials(), + binding.authField())::onNetwork; } return newStream; } @@ -895,7 +888,11 @@ private int decodeInitialType( final MqttConnectFW mqttConnect = mqttConnectRO.tryWrap(buffer, offset, limit); if (mqttConnect != null) { - final int reasonCode = decodeConnectProtocol(mqttConnect.protocolName(), mqttConnect.protocolVersion()); + int reasonCode = decodeConnectProtocol(mqttConnect.protocolName(), mqttConnect.protocolVersion()); + if (!server.versions.contains(MqttVersion.ofProtocol(mqttConnect.protocolVersion()))) + { + reasonCode = UNSUPPORTED_PROTOCOL_VERSION; + } if (reasonCode != SUCCESS) { server.onDecodeError(traceId, authorization, reasonCode, MQTT_PROTOCOL_VERSION_5); @@ -1250,7 +1247,7 @@ private int decodePublishV4( String16FW topicName; int publishLimit; - int packetId = -1; + int packetId = 0; if (qos > 0) { final MqttPublishQosV4FW publish = @@ -1354,7 +1351,7 @@ private int decodePublishV5( String16FW topicName; MqttPropertiesFW properties; int publishLimit; - int packetId = -1; + int packetId = 0; if (qos > 0) { final MqttPublishQosV5FW publish = @@ -1486,7 +1483,7 @@ private int decodePublishPayload( boolean canPublish = MqttState.initialOpened(publisher.state); final int maximum = publishablePayloadSize; - final int minimum = Math.min(maximum, 1024); + final int minimum = Math.min(maximum, Math.max(publisher.initialMin, 1024)); int valueClaimed = maximum; @@ -1507,7 +1504,7 @@ private int decodePublishPayload( server.onDecodePublishPayload(traceId, authorization, valueClaimed, server.decodedPacketId, server.decodedQos, server.decodedFlags, server.decodedExpiryInterval, server.decodedContentType, server.decodedPayloadFormat, server.decodedResponseTopic, server.decodedCorrelationData, server.decodedUserProperties, - payload, payload.offset(), payload.offset() + valueClaimed); + payload, payload.offset(), payload.offset() + valueClaimed, publisher.contentType); progress = payload.offset() + valueClaimed; @@ -2003,14 +2000,6 @@ private int decodePubcompV5( return progress; } - private boolean validContent( - String topic, - OctetsFW payload) - { - final Validator contentValidator = validators.get(topic); - return contentValidator == null || contentValidator.write(payload.value(), payload.offset(), payload.sizeof()); - } - private boolean invalidUtf8( OctetsFW payload) { @@ -2403,6 +2392,7 @@ private final class MqttServer private final GuardHandler guard; private final Function credentials; private final MqttConnectProperty authField; + private final List versions; private final OctetsFW.Builder correlationDataRW = new OctetsFW.Builder(); private final Array32FW.Builder userPropertiesRW = @@ -2446,7 +2436,7 @@ private final class MqttServer private long keepAliveTimeoutId = NO_CANCEL_ID; private long keepAliveTimeoutAt; - private int maximumQos; + private int subscribeQosMax; private int packetSizeMax; private int capabilities = RETAIN_AVAILABLE_MASK | SUBSCRIPTION_IDS_AVAILABLE_MASK | WILDCARD_AVAILABLE_MASK; private boolean serverDefinedKeepAlive = false; @@ -2466,8 +2456,8 @@ private final class MqttServer private int decodableRemainingBytes; private final Int2ObjectHashMap qos1Subscribes; private final Int2ObjectHashMap qos2Subscribes; - private final LinkedHashMap unAckedReceivedQos1PacketIds; - private final LinkedHashMap unAckedReceivedQos2PacketIds; + private final Long2LongHashMap unAckedReceivedQos1PacketIds; + private final Long2LongHashMap unAckedReceivedQos2PacketIds; private IntArrayList unreleasedPacketIds; @@ -2484,37 +2474,37 @@ private final class MqttServer private Array32FW decodedUserProperties = null; private MqttServer( - Function credentials, - MqttConnectProperty authField, - MqttOptionsConfig options, - ToLongFunction resolveId, MessageConsumer network, long originId, long routedId, long initialId, - long replyId, long affinity, - long budgetId) + List versions, + GuardHandler guard, + Function credentials, + MqttConnectProperty authField) { this.network = network; this.originId = originId; this.routedId = routedId; this.initialId = initialId; - this.replyId = replyId; - this.encodeBudgetId = budgetId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.versions = versions; + this.guard = guard; + this.credentials = credentials; + this.authField = authField; + this.encodeBudgetId = supplyBudgetId.getAsLong(); this.decoder = decodeInitialType; this.publishes = new Long2ObjectHashMap<>(); this.subscribes = new Long2ObjectHashMap<>(); this.topicAliases = new Int2ObjectHashMap<>(); this.subscribePacketIds = new Int2IntHashMap(-1); this.unsubscribePacketIds = new Object2IntHashMap<>(-1); - this.unAckedReceivedQos1PacketIds = new LinkedHashMap<>(); - this.unAckedReceivedQos2PacketIds = new LinkedHashMap<>(); + this.unAckedReceivedQos1PacketIds = new Long2LongHashMap(-1); + this.unAckedReceivedQos2PacketIds = new Long2LongHashMap(-1); this.qos1Subscribes = new Int2ObjectHashMap<>(); this.qos2Subscribes = new Int2ObjectHashMap<>(); - this.guard = resolveGuard(options, resolveId); - this.credentials = credentials; - this.authField = authField; + this.unreleasedPacketIds = new IntArrayList(); } private void onNetwork( @@ -2869,8 +2859,6 @@ private int onDecodeConnect( this.clientId = new String16FW(clientIdentifier.asString()); } - unreleasedPacketIds = unreleasedPacketIdsByClientId.computeIfAbsent(clientId.asString(), c -> new IntArrayList()); - this.keepAlive = (short) Math.min(Math.max(keepAlive, keepAliveMinimum), keepAliveMaximum); serverDefinedKeepAlive = this.keepAlive != keepAlive; keepAliveTimeout = Math.round(TimeUnit.SECONDS.toMillis(keepAlive) * 1.5); @@ -2941,13 +2929,18 @@ else if (this.authField.equals(MqttConnectProperty.PASSWORD)) this.session = new MqttSessionStream(originId, resolved.id, 0); + final int capabilities = versions.contains(MqttVersion.V_5) && versions.size() == 1 + ? REDIRECT_MASK : 0; + final MqttBeginExFW.Builder builder = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) .typeId(mqttTypeId) .session(s -> s .flags(connectFlags & (CLEAN_START_FLAG_MASK | WILL_FLAG_MASK)) .expiry(sessionExpiry) - .clientId(clientId) - ); + .publishQosMax(MqttQoS.EXACTLY_ONCE.value()) + .capabilities(capabilities) + .clientId(clientId)); + session.doSessionBegin(traceId, affinity, builder.build()); if (willFlagSet) @@ -3023,7 +3016,7 @@ private int onDecodeConnectWillMessage( final int flags = connectFlags; final int willQos = decodeWillQos(flags); - if (willQos > maximumQos) + if (willQos > subscribeQosMax) { reasonCode = QOS_NOT_SUPPORTED; break decode; @@ -3157,7 +3150,8 @@ private MqttPublishStream resolvePublishStream( { final long resolvedId = resolved.id; - stream = publishes.computeIfAbsent(topicKey, s -> new MqttPublishStream(routedId, resolvedId, topic, qos)); + stream = publishes.computeIfAbsent(topicKey, s -> + new MqttPublishStream(routedId, resolvedId, topic, qos, binding.supplyModelConfig(topic))); stream.doPublishBegin(traceId, affinity, qos); } else @@ -3178,7 +3172,7 @@ private void onDecodePublish( { int reasonCode = SUCCESS; - if (qos > maximumQos) + if (qos > subscribeQosMax) { reasonCode = QOS_NOT_SUPPORTED; } @@ -3224,7 +3218,8 @@ private void onDecodePublishPayload( Array32FW userProperties, OctetsFW payload, int offset, - int limit) + int limit, + ValidatorHandler model) { int reasonCode = SUCCESS; @@ -3233,7 +3228,7 @@ private void onDecodePublishPayload( reasonCode = PAYLOAD_FORMAT_INVALID; } - if (validators != null && !validContent(mqttPublishHelper.topic, payload)) + if (model != null && !validContent(model, payload)) { reasonCode = PAYLOAD_FORMAT_INVALID; } @@ -3253,11 +3248,6 @@ private void onDecodePublishPayload( if (publishPayloadDeferred == 0) { - if (qos == 2) - { - unreleasedPacketIds.add(packetId); - } - publishPayloadDeferred = publishPayloadBytes - length; final Flyweight dataEx = mqttPublishDataExRW.wrap(dataExtBuffer, 0, dataExtBuffer.capacity()) .typeId(mqttTypeId) @@ -3266,6 +3256,7 @@ private void onDecodePublishPayload( p.deferred(publishPayloadDeferred) .qos(qos) .flags(flags) + .packetId(packetId) .expiryInterval(expiryInterval) .contentType(contentType) .format(f -> f.set(payloadFormat)) @@ -3341,16 +3332,7 @@ private void onDecodePubrel( int limit, int packetId) { - unreleasedPacketIds.removeInt(packetId); - switch (version) - { - case 4: - doEncodePubcompV4(traceId, authorization, packetId); - break; - case 5: - doEncodePubcompV5(traceId, authorization, packetId); - break; - } + session.doSessionFlush(traceId, 0, packetId); doSignalKeepAliveTimeout(traceId); } @@ -4508,10 +4490,10 @@ private void doEncodeConnackV5( propertiesSize = mqttProperty.limit(); } - if (0 <= maximumQos && maximumQos < 2) + if (0 <= subscribeQosMax && subscribeQosMax < 2) { mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) - .maximumQoS((byte) maximumQos) + .maximumQoS((byte) subscribeQosMax) .build(); propertiesSize = mqttProperty.limit(); } @@ -4980,6 +4962,14 @@ private int calculateSubscribeFlags( return flags; } + private boolean validContent( + ValidatorHandler contentType, + OctetsFW payload) + { + return contentType == null || + contentType.validate(payload.buffer(), payload.offset(), payload.sizeof(), ValueConsumer.NOP); + } + private final class Subscription { private int id = 0; @@ -5079,6 +5069,10 @@ private void onSession( final BeginFW begin = beginRO.wrap(buffer, index, index + length); onSessionBegin(begin); break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onSessionFlush(flush); + break; case DataFW.TYPE_ID: final DataFW data = dataRO.wrap(buffer, index, index + length); onSessionData(data); @@ -5208,15 +5202,54 @@ private void onSessionBegin( assert mqttBeginEx.kind() == MqttBeginExFW.KIND_SESSION; final MqttSessionBeginExFW mqttSessionBeginEx = mqttBeginEx.session(); + final PrimitiveIterator.OfInt packetIds = mqttSessionBeginEx.packetIds(); sessionExpiry = mqttSessionBeginEx.expiry(); capabilities = mqttSessionBeginEx.capabilities(); - maximumQos = mqttSessionBeginEx.qosMax(); + subscribeQosMax = mqttSessionBeginEx.subscribeQosMax(); + if (packetIds != null) + { + packetIds.forEachRemaining((IntConsumer) p -> unreleasedPacketIds.add(p)); + } } doSessionWindow(traceId, encodeSlotOffset, encodeBudgetMax); } + private void onSessionFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + final OctetsFW extension = flush.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + + assert replyAck <= replySeq; + + if (extension.sizeof() > 0) + { + final MqttFlushExFW sessionFlushEx = extension.get(mqttFlushExRO::tryWrap); + final int packetId = sessionFlushEx.session().packetId(); + + unreleasedPacketIds.removeInt(packetId); + switch (version) + { + case 4: + doEncodePubcompV4(traceId, authorization, packetId); + break; + case 5: + doEncodePubcompV5(traceId, authorization, packetId); + break; + } + } + } + private void onSessionData( DataFW data) { @@ -5390,6 +5423,23 @@ private void doSessionBegin( } } + private void doSessionFlush( + long traceId, + int reserved, + int packetId) + { + doFlush(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, sessionId, 0L, reserved, + ex -> ex.set((b, o, l) -> mqttFlushExRW.wrap(b, o, l) + .typeId(mqttTypeId) + .session(sessionBuilder -> sessionBuilder.packetId(packetId)) + .build() + .sizeof())); + + initialSeq += reserved; + assert initialSeq <= initialAck + initialMax; + } + private boolean hasSessionWindow( int length) { @@ -5563,6 +5613,7 @@ private class MqttPublishStream private final long routedId; private final long initialId; private final long replyId; + private final ValidatorHandler contentType; private long budgetId; private BudgetDebitor debitor; @@ -5571,6 +5622,7 @@ private class MqttPublishStream private long initialSeq; private long initialAck; private int initialMax; + private int initialMin; private int initialPad; private int decodablePayloadSize; @@ -5587,7 +5639,8 @@ private class MqttPublishStream long originId, long routedId, String topic, - int qos) + int qos, + ModelConfig config) { this.originId = originId; this.routedId = routedId; @@ -5595,6 +5648,7 @@ private class MqttPublishStream this.replyId = supplyReplyId.applyAsLong(initialId); this.topic = topic; this.topicKey = topicKey(topic, qos); + this.contentType = config != null ? supplyValidator.apply(config) : null; } private void doPublishBegin( @@ -5773,6 +5827,7 @@ private void onPublishWindow( final long sequence = window.sequence(); final long acknowledge = window.acknowledge(); final int maximum = window.maximum(); + final int minimum = window.minimum(); final long traceId = window.traceId(); final long authorization = window.authorization(); final long budgetId = window.budgetId(); @@ -5788,6 +5843,7 @@ private void onPublishWindow( initialAck = acknowledge; initialMax = maximum; + initialMin = minimum; initialPad = padding; assert initialAck <= initialSeq; @@ -5815,17 +5871,17 @@ private void acknowledgePublishPackets( long traceId, long authorization) { - for (Map.Entry e : unAckedReceivedQos1PacketIds.entrySet()) + for (Map.Entry e : unAckedReceivedQos1PacketIds.entrySet()) { if (e.getKey() <= acknowledge) { switch (version) { case 4: - doEncodePubackV4(traceId, authorization, e.getValue()); + doEncodePubackV4(traceId, authorization, e.getValue().intValue()); break; case 5: - doEncodePubackV5(traceId, authorization, e.getValue()); + doEncodePubackV5(traceId, authorization, e.getValue().intValue()); break; } unAckedReceivedQos1PacketIds.remove(e.getKey()); @@ -5836,20 +5892,22 @@ private void acknowledgePublishPackets( } } - for (Map.Entry e : unAckedReceivedQos2PacketIds.entrySet()) + for (Map.Entry e : unAckedReceivedQos2PacketIds.entrySet()) { if (e.getKey() <= acknowledge) { + final int packetId = e.getValue().intValue(); switch (version) { case 4: - doEncodePubrecV4(traceId, authorization, e.getValue()); + doEncodePubrecV4(traceId, authorization, packetId); break; case 5: - doEncodePubrecV5(traceId, authorization, e.getValue()); + doEncodePubrecV5(traceId, authorization, packetId); break; } unAckedReceivedQos2PacketIds.remove(e.getKey()); + unreleasedPacketIds.add(packetId); } else { @@ -6386,7 +6444,7 @@ private void onSubscribeFlush( if (extension.sizeof() > 0) { - final MqttFlushExFW subscribeFlushEx = extension.get(mqttSubscribeFlushExRO::tryWrap); + final MqttFlushExFW subscribeFlushEx = extension.get(mqttFlushExRO::tryWrap); final int packetId = subscribeFlushEx.subscribe().packetId(); switch (version) @@ -7069,21 +7127,5 @@ private int calculatePublishApplicationFlags( return flags; } } - - private GuardHandler resolveGuard( - MqttOptionsConfig options, - ToLongFunction resolveId) - { - GuardHandler guard = null; - - if (options != null && - options.authorization != null) - { - long guardId = resolveId.applyAsLong(options.authorization.name); - guard = supplyGuard.apply(guardId); - } - - return guard; - } } diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java index 0c40255b78..ff791dfe37 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java @@ -38,7 +38,7 @@ import io.aklivity.zilla.runtime.binding.mqtt.config.MqttOptionsConfig; import io.aklivity.zilla.runtime.binding.mqtt.config.MqttPatternConfig; import io.aklivity.zilla.runtime.binding.mqtt.config.MqttTopicConfig; -import io.aklivity.zilla.runtime.engine.test.internal.validator.config.TestValidatorConfig; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; public class MqttOptionsConfigAdapterTest { @@ -57,6 +57,11 @@ public void shouldReadOptions() { String text = "{" + + "\"versions\":" + + "[" + + "v3.1.1," + + "v5" + + "]," + "\"authorization\":" + "{" + "\"test0\":" + @@ -95,15 +100,23 @@ public void shouldReadOptions() MqttTopicConfig topic = options.topics.get(0); assertThat(topic.name, equalTo("sensor/one")); - assertThat(topic.content, instanceOf(TestValidatorConfig.class)); - assertThat(topic.content.type, equalTo("test")); + assertThat(topic.content, instanceOf(TestModelConfig.class)); + assertThat(topic.content.model, equalTo("test")); + assertThat(options.versions.get(0), equalTo(MqttVersion.V3_1_1)); + assertThat(options.versions.get(1), equalTo(MqttVersion.V_5)); } @Test public void shouldWriteOptions() { List topics = new ArrayList<>(); - topics.add(new MqttTopicConfig("sensor/one", new TestValidatorConfig())); + topics.add(new MqttTopicConfig("sensor/one", + TestModelConfig.builder() + .length(0) + .build())); + List versions = new ArrayList<>(); + versions.add(MqttVersion.V3_1_1); + versions.add(MqttVersion.V_5); MqttOptionsConfig options = new MqttOptionsConfig( new MqttAuthorizationConfig( @@ -112,7 +125,7 @@ public void shouldWriteOptions() singletonList(new MqttPatternConfig( MqttPatternConfig.MqttConnectProperty.USERNAME, "Bearer {credentials}")))), - topics); + topics, versions); String text = jsonb.toJson(options); @@ -138,6 +151,11 @@ public void shouldWriteOptions() "\"name\":\"sensor/one\"," + "\"content\":\"test\"" + "}" + + "]," + + "\"versions\":" + + "[" + + "\"v3.1.1\"," + + "\"v5\"" + "]" + "}")); } diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/ConnectionIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/ConnectionIT.java index 3ee00d0f5c..09d0a4965e 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/ConnectionIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/ConnectionIT.java @@ -154,7 +154,6 @@ public void shouldDisconnectAfterSubscribeAndPublish() throws Exception k3po.finish(); } - @Test @Configuration("server.yaml") @Specification({ @@ -164,6 +163,15 @@ public void shouldRejectInvalidProtocolVersion() throws Exception k3po.finish(); } + @Test + @Configuration("server.protocol.version.yaml") + @Specification({ + "${net}/connect.unsupported.protocol.version/client"}) + public void shouldRejectUnsupportedProtocolVersion() throws Exception + { + k3po.finish(); + } + @Test @Configuration("server.yaml") @Specification({ diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/PublishIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/PublishIT.java index 11cc3d34d8..629bd42105 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/PublishIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/PublishIT.java @@ -423,6 +423,16 @@ public void shouldPublishQoS2MessageAckWithReasoncode() throws Exception k3po.finish(); } + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/publish.qos2.recovery/client", + "${app}/publish.qos2.recovery/server"}) + public void shouldReleaseQos2PacketIdDuringRecovery() throws Exception + { + k3po.finish(); + } + @Test @Configuration("server.yaml") @Specification({ diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/SessionIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/SessionIT.java index bf2127735b..640d6bc3d7 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/SessionIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/SessionIT.java @@ -246,6 +246,16 @@ public void shouldRedirectBeforeConnack() throws Exception k3po.finish(); } + @Test + @Configuration("server.protocol.version.yaml") + @Specification({ + "${net}/connect.successful/client", + "${app}/session.connect.redirect.support/server"}) + public void shouldConnectSupportSharding() throws Exception + { + k3po.finish(); + } + @Test @Configuration("server.route.non.default.yaml") @Specification({ diff --git a/runtime/binding-proxy/pom.xml b/runtime/binding-proxy/pom.xml index 29aa6c4912..12ebd0b670 100644 --- a/runtime/binding-proxy/pom.xml +++ b/runtime/binding-proxy/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-sse-kafka/pom.xml b/runtime/binding-sse-kafka/pom.xml index 407c0e5f61..6b42b605ad 100644 --- a/runtime/binding-sse-kafka/pom.xml +++ b/runtime/binding-sse-kafka/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-sse/pom.xml b/runtime/binding-sse/pom.xml index 5488a01250..81b9bf8ebd 100644 --- a/runtime/binding-sse/pom.xml +++ b/runtime/binding-sse/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-tcp/pom.xml b/runtime/binding-tcp/pom.xml index 6ffc302ac5..4869a2dbce 100644 --- a/runtime/binding-tcp/pom.xml +++ b/runtime/binding-tcp/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-tls/pom.xml b/runtime/binding-tls/pom.xml index bbf954ac62..8d300e1772 100644 --- a/runtime/binding-tls/pom.xml +++ b/runtime/binding-tls/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/TlsConfiguration.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/TlsConfiguration.java index 9ef67e8fb7..c88108b9de 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/TlsConfiguration.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/TlsConfiguration.java @@ -15,6 +15,8 @@ */ package io.aklivity.zilla.runtime.binding.tls.internal; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_VERBOSE; + import java.security.KeyStore; import io.aklivity.zilla.runtime.engine.Configuration; @@ -30,6 +32,7 @@ public class TlsConfiguration extends Configuration public static final BooleanPropertyDef TLS_IGNORE_EMPTY_VAULT_REFS; public static final LongPropertyDef TLS_AWAIT_SYNC_CLOSE_MILLIS; public static final BooleanPropertyDef TLS_PROACTIVE_CLIENT_REPLY_BEGIN; + public static final BooleanPropertyDef TLS_VERBOSE; private static final ConfigurationDef TLS_CONFIG; @@ -45,6 +48,7 @@ public class TlsConfiguration extends Configuration TLS_IGNORE_EMPTY_VAULT_REFS = config.property("ignore.empty.vault.refs", false); TLS_AWAIT_SYNC_CLOSE_MILLIS = config.property("await.sync.close.millis", 3000L); TLS_PROACTIVE_CLIENT_REPLY_BEGIN = config.property("proactive.client.reply.begin", false); + TLS_VERBOSE = config.property("verbose", TlsConfiguration::verboseDefault); TLS_CONFIG = config; } @@ -99,6 +103,11 @@ public boolean proactiveClientReplyBegin() return TLS_PROACTIVE_CLIENT_REPLY_BEGIN.get(this); } + public boolean verbose() + { + return TLS_VERBOSE.getAsBoolean(this); + } + private static String cacertsStoreTypeDefault( Configuration config) { @@ -110,4 +119,10 @@ private static String cacertsStoreDefault( { return System.getProperty("javax.net.ssl.trustStore"); } + + private static boolean verboseDefault( + Configuration config) + { + return ENGINE_VERBOSE.getAsBoolean(config); + } } diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java index 9dc93986af..8272116134 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java @@ -20,6 +20,7 @@ import static io.aklivity.zilla.runtime.binding.tls.internal.types.ProxyInfoType.AUTHORITY; import static io.aklivity.zilla.runtime.binding.tls.internal.types.ProxyInfoType.SECURE; import static io.aklivity.zilla.runtime.binding.tls.internal.types.ProxySecureInfoType.NAME; +import static java.lang.System.currentTimeMillis; import static java.util.Collections.singletonList; import static java.util.stream.Collectors.toList; import static javax.net.ssl.StandardConstants.SNI_HOST_NAME; @@ -69,7 +70,7 @@ public final class TlsBindingConfig public final long id; public final long vaultId; - public final String name; + public final String qname; public final TlsOptionsConfig options; public final KindConfig kind; public final List routes; @@ -81,7 +82,7 @@ public TlsBindingConfig( { this.id = binding.id; this.vaultId = binding.vaultId; - this.name = binding.name; + this.qname = binding.qname; this.kind = binding.kind; this.options = binding.options != null ? TlsOptionsConfig.class.cast(binding.options) : OPTIONS_DEFAULT; this.routes = binding.routes.stream().map(TlsRouteConfig::new).collect(toList()); @@ -439,6 +440,16 @@ private KeyStore newKeys( for (String keyName : keyNames) { KeyStore.PrivateKeyEntry entry = vault.key(keyName); + if (entry == null) + { + if (config.verbose()) + { + System.out.printf("%d [%s] key \"%s\" not found\n", + currentTimeMillis(), this.qname, keyName); + } + continue; + } + KeyStore.ProtectionParameter protection = new KeyStore.PasswordProtection(password); store.setEntry(keyName, entry, protection); } diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsClientFactory.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsClientFactory.java index 75b6e7d1b3..fd022f46d0 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsClientFactory.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsClientFactory.java @@ -627,6 +627,10 @@ else if (TlsState.replyClosed(client.state)) } } } + else if (client.handshakeTimeoutFutureId != NO_CANCEL_ID) + { + client.decoder = decodeHandshakeFinished; + } return progress; } @@ -1151,15 +1155,20 @@ private void doAppData( private void doAppEnd( long traceId) { - state = TlsState.closeReply(state); - client.stream = nullIfClosed(state, client.stream); - doEnd(app, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, client.replyAuth, EMPTY_EXTENSION); + if (TlsState.replyOpening(state) && + !TlsState.replyClosed(state)) + { + state = TlsState.closeReply(state); + client.stream = nullIfClosed(state, client.stream); + doEnd(app, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, client.replyAuth, EMPTY_EXTENSION); + } } private void doAppAbort( long traceId) { - if (TlsState.replyOpening(state) && !TlsState.replyClosed(state)) + if (TlsState.replyOpening(state) && + !TlsState.replyClosed(state)) { state = TlsState.closeReply(state); client.stream = nullIfClosed(state, client.stream); @@ -1181,7 +1190,7 @@ private void doAppFlush( private void doAppReset( long traceId) { - if (TlsState.initialOpening(state) && !TlsState.initialClosed(state)) + if (!TlsState.initialClosed(state)) { state = TlsState.closeInitial(state); client.stream = nullIfClosed(state, client.stream); @@ -2103,6 +2112,6 @@ private static Optional nullIfClosed( int state, Optional stream) { - return TlsState.initialClosed(state) && TlsState.replyClosed(state) ? NULL_STREAM : stream; + return TlsState.closed(state) ? NULL_STREAM : stream; } } diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsServerFactory.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsServerFactory.java index faf7445a4d..f78ccc88fb 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsServerFactory.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsServerFactory.java @@ -656,6 +656,10 @@ else if (TlsState.initialClosed(server.state)) } } } + else if (server.handshakeTimeoutFutureId != NO_CANCEL_ID) + { + server.decoder = decodeHandshakeFinished; + } return progress; } @@ -2139,7 +2143,8 @@ private void doAppData( private void doAppEnd( long traceId) { - if (TlsState.initialOpened(state)) + if (TlsState.initialOpening(state) && + !TlsState.initialClosing(state)) { state = TlsState.closeInitial(state); stream = nullIfClosed(state, stream); @@ -2280,7 +2285,7 @@ private static Optional nullIfClosed( int state, Optional stream) { - return TlsState.initialClosed(state) && TlsState.replyClosed(state) ? NULL_STREAM : stream; + return TlsState.closed(state) ? NULL_STREAM : stream; } private String getCommonName( diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsState.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsState.java index 8fced5ad08..6233cc3d94 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsState.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/stream/TlsState.java @@ -122,6 +122,12 @@ static boolean replyClosed( return (state & REPLY_CLOSED) != 0; } + static boolean closed( + int state) + { + return initialClosed(state) && replyClosed(state); + } + private TlsState() { // utility diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/BridgeIT.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/BridgeIT.java new file mode 100644 index 0000000000..e8ebe71339 --- /dev/null +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/BridgeIT.java @@ -0,0 +1,68 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.tls.internal.streams; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class BridgeIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("bridge", "io/aklivity/zilla/specs/binding/tls/streams/bridge"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(10, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .countersBufferCapacity(8192) + .configurationRoot("io/aklivity/zilla/specs/binding/tls/config") + .external("app1") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + @Test + @Configuration("bridge.tls1.2.yaml") + @Specification({ + "${bridge}/handshake/client", + "${bridge}/handshake/server"}) + public void shouldHandshakeWithTls12() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("bridge.tls1.3.yaml") + @Specification({ + "${bridge}/handshake/client", + "${bridge}/handshake/server"}) + public void shouldHandshakeWithTls13() throws Exception + { + k3po.finish(); + } +} diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerIT.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerIT.java index 19c167ff90..f36f40709c 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerIT.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerIT.java @@ -94,6 +94,13 @@ public void shouldNotNegotiateWithAlpnAsProtocolMismatch() throws Exception k3po.finish(); } + @Test + @Configuration("server.keys.not.found.yaml") + public void shouldIgnoreKeysNotFound() throws Exception + { + } + + @Ignore("https://github.com/k3po/k3po/issues/454 - Support connect aborted") @Test @Configuration("server.yaml") diff --git a/runtime/binding-ws/pom.xml b/runtime/binding-ws/pom.xml index 80ad7afc72..d09acc1d69 100644 --- a/runtime/binding-ws/pom.xml +++ b/runtime/binding-ws/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/command-metrics/pom.xml b/runtime/command-metrics/pom.xml index 23fb2f2dbd..2f087d7858 100644 --- a/runtime/command-metrics/pom.xml +++ b/runtime/command-metrics/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/command-start/pom.xml b/runtime/command-start/pom.xml index 4487d643e0..f278c99006 100644 --- a/runtime/command-start/pom.xml +++ b/runtime/command-start/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/command-stop/pom.xml b/runtime/command-stop/pom.xml index 908297f007..68ff1075d4 100644 --- a/runtime/command-stop/pom.xml +++ b/runtime/command-stop/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/command/pom.xml b/runtime/command/pom.xml index f2f0525cc8..5adeddbf59 100644 --- a/runtime/command/pom.xml +++ b/runtime/command/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/common/pom.xml b/runtime/common/pom.xml index 219435ee7c..498e0429a6 100644 --- a/runtime/common/pom.xml +++ b/runtime/common/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/engine/pom.xml b/runtime/engine/pom.xml index a422ff9dbf..f3f2d61d00 100644 --- a/runtime/engine/pom.xml +++ b/runtime/engine/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml @@ -215,7 +215,7 @@ io/aklivity/zilla/specs/engine/schema/guard/test.schema.patch.json, io/aklivity/zilla/specs/engine/schema/metrics/test.schema.patch.json, io/aklivity/zilla/specs/engine/schema/catalog/test.schema.patch.json, - io/aklivity/zilla/specs/engine/schema/validator/test.schema.patch.json, + io/aklivity/zilla/specs/engine/schema/model/test.schema.patch.json, io/aklivity/zilla/specs/engine/schema/vault/test.schema.patch.json ${project.build.directory}/test-classes @@ -256,8 +256,8 @@ io/aklivity/zilla/runtime/engine/test/internal/exporter/**/*.class io/aklivity/zilla/runtime/engine/test/internal/guard/**/*.class io/aklivity/zilla/runtime/engine/test/internal/metrics/**/*.class + io/aklivity/zilla/runtime/engine/test/internal/model/**/*.class io/aklivity/zilla/runtime/engine/test/internal/resolver/**/*.class - io/aklivity/zilla/runtime/engine/test/internal/validator/**/*.class io/aklivity/zilla/runtime/engine/test/internal/vault/**/*.class io/aklivity/zilla/runtime/engine/internal/concurrent/bench/**/*.class org/openjdk/jmh/infra/generated/**/*.class diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/Engine.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/Engine.java index 911cf7037c..7a8f6935a8 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/Engine.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/Engine.java @@ -71,11 +71,10 @@ import io.aklivity.zilla.runtime.engine.internal.registry.FileWatcherTask; import io.aklivity.zilla.runtime.engine.internal.registry.HttpWatcherTask; import io.aklivity.zilla.runtime.engine.internal.registry.WatcherTask; -import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; import io.aklivity.zilla.runtime.engine.metrics.Collector; import io.aklivity.zilla.runtime.engine.metrics.MetricGroup; -import io.aklivity.zilla.runtime.engine.validator.ValidatorFactory; -import io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.namespace.NamespacedId; import io.aklivity.zilla.runtime.engine.vault.Vault; public final class Engine implements Collector, AutoCloseable @@ -105,7 +104,7 @@ public final class Engine implements Collector, AutoCloseable Collection metricGroups, Collection vaults, Collection catalogs, - ValidatorFactory validatorFactory, + Collection models, ErrorHandler errorHandler, Collection affinities, boolean readonly) @@ -160,7 +159,7 @@ public final class Engine implements Collector, AutoCloseable { EngineWorker worker = new EngineWorker(config, tasks, labels, errorHandler, tuning::affinity, - bindings, exporters, guards, vaults, catalogs, metricGroups, validatorFactory, + bindings, exporters, guards, vaults, catalogs, models, metricGroups, this, coreIndex, readonly); workers.add(worker); } @@ -181,7 +180,7 @@ public final class Engine implements Collector, AutoCloseable schemaTypes.addAll(metricGroups.stream().map(MetricGroup::type).filter(Objects::nonNull).collect(toList())); schemaTypes.addAll(vaults.stream().map(Vault::type).filter(Objects::nonNull).collect(toList())); schemaTypes.addAll(catalogs.stream().map(Catalog::type).filter(Objects::nonNull).collect(toList())); - schemaTypes.addAll(validatorFactory.validatorSpis().stream().map(ValidatorFactorySpi::schema).collect(toList())); + schemaTypes.addAll(models.stream().map(Model::type).filter(Objects::nonNull).collect(toList())); final Map bindingsByType = bindings.stream() .collect(Collectors.toMap(b -> b.name(), b -> b)); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineBuilder.java index ad0a204e4a..ae989060c0 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineBuilder.java @@ -33,7 +33,8 @@ import io.aklivity.zilla.runtime.engine.guard.GuardFactory; import io.aklivity.zilla.runtime.engine.metrics.MetricGroup; import io.aklivity.zilla.runtime.engine.metrics.MetricGroupFactory; -import io.aklivity.zilla.runtime.engine.validator.ValidatorFactory; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelFactory; import io.aklivity.zilla.runtime.engine.vault.Vault; import io.aklivity.zilla.runtime.engine.vault.VaultFactory; @@ -130,11 +131,17 @@ public Engine build() catalogs.add(catalog); } - final ValidatorFactory validatorFactory = ValidatorFactory.instantiate(); + final Set models = new LinkedHashSet<>(); + final ModelFactory modelFactory = ModelFactory.instantiate(); + for (String name : modelFactory.names()) + { + Model model = modelFactory.create(name, config); + models.add(model); + } final ErrorHandler errorHandler = requireNonNull(this.errorHandler, "errorHandler"); return new Engine(config, bindings, exporters, guards, metricGroups, vaults, - catalogs, validatorFactory, errorHandler, affinities, readonly); + catalogs, models, errorHandler, affinities, readonly); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java index 960600c503..8ef1e2723a 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java @@ -38,6 +38,8 @@ public class EngineConfiguration extends Configuration { + public static final String ZILLA_NAME_PROPERTY = "zilla.name"; + public static final boolean DEBUG_BUDGETS = Boolean.getBoolean("zilla.engine.debug.budgets"); public static final PropertyDef ENGINE_CONFIG_URL; @@ -76,7 +78,7 @@ public class EngineConfiguration extends Configuration final ConfigurationDef config = new ConfigurationDef("zilla.engine"); ENGINE_CONFIG_URL = config.property(URL.class, "config.url", EngineConfiguration::configURL, "file:zilla.yaml"); ENGINE_CONFIG_POLL_INTERVAL_SECONDS = config.property("config.poll.interval.seconds", 60); - ENGINE_NAME = config.property("name", "engine"); + ENGINE_NAME = config.property("name", EngineConfiguration::defaultName); ENGINE_DIRECTORY = config.property("directory", "."); ENGINE_CACHE_DIRECTORY = config.property(Path.class, "cache.directory", EngineConfiguration::cacheDirectory, "cache"); ENGINE_HOST_RESOLVER = config.property(HostResolver.class, "host.resolver", @@ -312,6 +314,12 @@ InetAddress[] resolve( String name); } + private static String defaultName( + Configuration config) + { + return System.getProperty(ZILLA_NAME_PROPERTY, "zilla"); + } + private static HostResolver decodeHostResolver( Configuration config, String value) diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineContext.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineContext.java index 0612ec1c15..2f113352a8 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineContext.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineContext.java @@ -19,7 +19,6 @@ import java.net.URL; import java.nio.channels.SelectableChannel; import java.util.function.LongSupplier; -import java.util.function.ToLongFunction; import org.agrona.MutableDirectBuffer; @@ -31,12 +30,13 @@ import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; import io.aklivity.zilla.runtime.engine.concurrent.Signaler; import io.aklivity.zilla.runtime.engine.config.BindingConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; import io.aklivity.zilla.runtime.engine.config.NamespaceConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; import io.aklivity.zilla.runtime.engine.guard.GuardHandler; import io.aklivity.zilla.runtime.engine.metrics.Metric; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; import io.aklivity.zilla.runtime.engine.poller.PollerKey; -import io.aklivity.zilla.runtime.engine.validator.Validator; import io.aklivity.zilla.runtime.engine.vault.VaultHandler; public interface EngineContext @@ -128,16 +128,21 @@ VaultHandler supplyVault( CatalogHandler supplyCatalog( long catalogId); + ValidatorHandler supplyValidator( + ModelConfig config); + + ConverterHandler supplyReadConverter( + ModelConfig config); + + ConverterHandler supplyWriteConverter( + ModelConfig config); + URL resolvePath( String path); Metric resolveMetric( String name); - Validator createValidator( - ValidatorConfig validator, - ToLongFunction resolveId); - void onExporterAttached( long exporterId); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/catalog/CatalogHandler.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/catalog/CatalogHandler.java index ada46dc8cc..425509494f 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/catalog/CatalogHandler.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/catalog/CatalogHandler.java @@ -15,10 +15,48 @@ */ package io.aklivity.zilla.runtime.engine.catalog; +import org.agrona.DirectBuffer; + +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; + public interface CatalogHandler { int NO_SCHEMA_ID = 0; + @FunctionalInterface + interface Decoder + { + Decoder IDENTITY = (schemaId, data, index, length, next) -> + { + next.accept(data, index, length); + return length; + }; + + int accept( + int schemaId, + DirectBuffer data, + int index, + int length, + ValueConsumer next); + } + + @FunctionalInterface + interface Encoder + { + Encoder IDENTITY = (schemaId, data, index, length, next) -> + { + next.accept(data, index, length); + return length; + }; + + int accept( + int schemaId, + DirectBuffer data, + int index, + int length, + ValueConsumer next); + } + int register( String subject, String type, @@ -30,4 +68,38 @@ String resolve( int resolve( String subject, String version); + + default int resolve( + DirectBuffer data, + int index, + int length) + { + return NO_SCHEMA_ID; + } + + default int decode( + DirectBuffer data, + int index, + int length, + ValueConsumer next, + Decoder decoder) + { + return decoder.accept(NO_SCHEMA_ID, data, index, length, next); + } + + default int encode( + int schemaId, + DirectBuffer data, + int index, + int length, + ValueConsumer next, + Encoder encoder) + { + return encoder.accept(schemaId, data, index, length, next); + } + + default int encodePadding() + { + return 0; + } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfig.java index f344b8e7e8..b9e2132e18 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfig.java @@ -40,6 +40,7 @@ public class BindingConfig public final String entry; public final String vault; public final OptionsConfig options; + public final List catalogs; public final List routes; public final TelemetryRefConfig telemetryRef; public final List composites; @@ -66,6 +67,7 @@ public static BindingConfigBuilder builder( .kind(binding.kind) .entry(binding.entry) .options(binding.options) + .catalogs(binding.catalogs) .routes(binding.routes) .telemetry(binding.telemetryRef) .composites(binding.composites); @@ -79,6 +81,7 @@ public static BindingConfigBuilder builder( String entry, String vault, OptionsConfig options, + List catalogs, List routes, TelemetryRefConfig telemetryRef, List namespaces) @@ -92,6 +95,7 @@ public static BindingConfigBuilder builder( this.vault = vault; this.options = options; this.routes = routes; + this.catalogs = catalogs; this.telemetryRef = telemetryRef; this.composites = namespaces; } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java index 716f9f508d..f0d97d4450 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java @@ -25,6 +25,7 @@ public final class BindingConfigBuilder extends ConfigBuilder> { public static final List ROUTES_DEFAULT = emptyList(); + public static final List CATALOGS_DEFAULT = emptyList(); public static final List COMPOSITES_DEFAULT = emptyList(); private final Function mapper; @@ -38,6 +39,7 @@ public final class BindingConfigBuilder extends ConfigBuilder routes; + private List catalogs; private TelemetryRefConfig telemetryRef; private List composites; @@ -116,6 +118,30 @@ public BindingConfigBuilder options( return this; } + public BindingConfigBuilder catalogs( + List catalogs) + { + this.catalogs = catalogs; + return this; + } + + public CatalogedConfigBuilder> catalog() + { + return new CatalogedConfigBuilder<>(this::catalog); + } + + public BindingConfigBuilder catalog( + CatalogedConfig catalog) + { + if (catalogs == null) + { + catalogs = new LinkedList<>(); + } + + catalogs.add(catalog); + return this; + } + public RouteConfigBuilder> route() { return new RouteConfigBuilder<>(this::route) @@ -196,6 +222,7 @@ public T build() entry, vault, options, + Optional.ofNullable(catalogs).orElse(CATALOGS_DEFAULT), Optional.ofNullable(routes).orElse(ROUTES_DEFAULT), telemetryRef, Optional.ofNullable(composites).orElse(COMPOSITES_DEFAULT))); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ModelConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ModelConfig.java new file mode 100644 index 0000000000..637e0d60df --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ModelConfig.java @@ -0,0 +1,38 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import java.util.List; + +public abstract class ModelConfig +{ + public final String model; + public final List cataloged; + + public ModelConfig( + String model) + { + this(model, null); + } + + public ModelConfig( + String model, + List cataloged) + { + this.model = model; + this.cataloged = cataloged; + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ValidatorConfigAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ModelConfigAdapter.java similarity index 74% rename from runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ValidatorConfigAdapter.java rename to runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ModelConfigAdapter.java index 22705005c9..652d728051 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ValidatorConfigAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ModelConfigAdapter.java @@ -28,20 +28,20 @@ import jakarta.json.JsonValue; import jakarta.json.bind.adapter.JsonbAdapter; -public final class ValidatorConfigAdapter implements JsonbAdapter +public final class ModelConfigAdapter implements JsonbAdapter { - private static final String TYPE_NAME = "type"; + private static final String MODEL_NAME = "model"; - private final Map delegatesByName; - private ValidatorConfigAdapterSpi delegate; + private final Map delegatesByName; + private ModelConfigAdapterSpi delegate; - public ValidatorConfigAdapter() + public ModelConfigAdapter() { delegatesByName = ServiceLoader - .load(ValidatorConfigAdapterSpi.class) + .load(ModelConfigAdapterSpi.class) .stream() .map(Supplier::get) - .collect(toMap(ValidatorConfigAdapterSpi::type, identity())); + .collect(toMap(ModelConfigAdapterSpi::type, identity())); } public void adaptType( @@ -52,20 +52,20 @@ public void adaptType( @Override public JsonValue adaptToJson( - ValidatorConfig options) + ModelConfig options) { return delegate != null ? delegate.adaptToJson(options) : null; } @Override - public ValidatorConfig adaptFromJson( + public ModelConfig adaptFromJson( JsonValue value) { JsonObject object = null; if (value instanceof JsonString) { object = Json.createObjectBuilder() - .add(TYPE_NAME, ((JsonString) value).getString()) + .add(MODEL_NAME, ((JsonString) value).getString()) .build(); } else if (value instanceof JsonObject) @@ -77,8 +77,8 @@ else if (value instanceof JsonObject) assert false; } - String type = object.containsKey(TYPE_NAME) - ? object.getString(TYPE_NAME) + String type = object.containsKey(MODEL_NAME) + ? object.getString(MODEL_NAME) : null; adaptType(type); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ValidatorConfigAdapterSpi.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ModelConfigAdapterSpi.java similarity index 84% rename from runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ValidatorConfigAdapterSpi.java rename to runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ModelConfigAdapterSpi.java index f7bf322a3e..320d529652 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ValidatorConfigAdapterSpi.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ModelConfigAdapterSpi.java @@ -18,16 +18,16 @@ import jakarta.json.JsonValue; import jakarta.json.bind.adapter.JsonbAdapter; -public interface ValidatorConfigAdapterSpi extends JsonbAdapter +public interface ModelConfigAdapterSpi extends JsonbAdapter { String type(); @Override JsonValue adaptToJson( - ValidatorConfig options); + ModelConfig options); @Override - ValidatorConfig adaptFromJson( + ModelConfig adaptFromJson( JsonValue object); } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/OptionsConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/OptionsConfig.java index 9f63c5001c..2e83dcc8f6 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/OptionsConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/OptionsConfig.java @@ -15,6 +15,21 @@ */ package io.aklivity.zilla.runtime.engine.config; +import java.util.Collections; +import java.util.List; + public class OptionsConfig { + public final List models; + + public OptionsConfig() + { + this(Collections.emptyList()); + } + + public OptionsConfig( + List models) + { + this.models = models; + } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/SchemaConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/SchemaConfig.java index 620e43b35c..d9c85a8bf8 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/SchemaConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/SchemaConfig.java @@ -23,17 +23,20 @@ public class SchemaConfig public final String version; public final String subject; public final int id; + public final String record; SchemaConfig( String strategy, String subject, String version, - int id) + int id, + String record) { this.strategy = strategy; this.version = version; this.subject = subject; this.id = id; + this.record = record; } public static SchemaConfigBuilder builder( diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/SchemaConfigAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/SchemaConfigAdapter.java index 1a16a922b3..7ff7c71efe 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/SchemaConfigAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/SchemaConfigAdapter.java @@ -27,6 +27,7 @@ public class SchemaConfigAdapter implements JsonbAdapter extends ConfigBuilder mapper) @@ -67,9 +68,16 @@ public SchemaConfigBuilder id( return this; } + public SchemaConfigBuilder record( + String record) + { + this.record = record; + return this; + } + @Override public T build() { - return mapper.apply(new SchemaConfig(strategy, subject, version, id)); + return mapper.apply(new SchemaConfig(strategy, subject, version, id, record)); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java index ecaf399ea0..6cc76d2de7 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java @@ -46,6 +46,7 @@ public class BindingConfigsAdapter implements JsonbAdapter { private static final String VAULT_NAME = "vault"; + private static final String CATALOG_NAME = "catalog"; private static final String EXIT_NAME = "exit"; private static final String TYPE_NAME = "type"; private static final String KIND_NAME = "kind"; @@ -57,6 +58,7 @@ public class BindingConfigsAdapter implements JsonbAdapter composites; @@ -69,6 +71,7 @@ public BindingConfigsAdapter( this.kind = new KindAdapter(context); this.route = new RouteAdapter(context); this.options = new OptionsAdapter(OptionsConfigAdapterSpi.Kind.BINDING, context); + this.cataloged = new CatalogedAdapter(); this.telemetryRef = new TelemetryRefAdapter(); this.composites = ServiceLoader @@ -117,6 +120,13 @@ public JsonObject adaptToJson( item.add(OPTIONS_NAME, options.adaptToJson(binding.options)); } + if (binding.catalogs != null && !binding.catalogs.isEmpty()) + { + JsonArrayBuilder catalogs = Json.createArrayBuilder(); + catalogs.add(cataloged.adaptToJson(binding.catalogs)); + item.add(CATALOG_NAME, catalogs); + } + if (!ROUTES_DEFAULT.equals(binding.routes)) { RouteConfig lastRoute = binding.routes.get(binding.routes.size() - 1); @@ -189,6 +199,11 @@ public BindingConfig[] adaptFromJson( binding.vault(item.getString(VAULT_NAME)); } + if (item.containsKey(CATALOG_NAME)) + { + binding.catalogs(cataloged.adaptFromJson(item.getJsonObject(CATALOG_NAME))); + } + if (item.containsKey(OPTIONS_NAME)) { binding.options(options.adaptFromJson(item.getJsonObject(OPTIONS_NAME))); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/CatalogedAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/CatalogedAdapter.java new file mode 100644 index 0000000000..ad7316e1c8 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/CatalogedAdapter.java @@ -0,0 +1,79 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.internal.config; + +import java.util.ArrayList; +import java.util.List; + +import jakarta.json.Json; +import jakarta.json.JsonArray; +import jakarta.json.JsonArrayBuilder; +import jakarta.json.JsonObject; +import jakarta.json.JsonObjectBuilder; +import jakarta.json.JsonValue; +import jakarta.json.bind.adapter.JsonbAdapter; + +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.SchemaConfig; +import io.aklivity.zilla.runtime.engine.config.SchemaConfigAdapter; + +public class CatalogedAdapter implements JsonbAdapter, JsonObject> +{ + private final SchemaConfigAdapter schema = new SchemaConfigAdapter(); + + public CatalogedAdapter() + { + } + + @Override + public JsonObject adaptToJson( + List catalogs) + { + JsonObjectBuilder catalogsBuilder = Json.createObjectBuilder(); + for (CatalogedConfig catalog : catalogs) + { + JsonArrayBuilder array = Json.createArrayBuilder(); + for (SchemaConfig schemaItem: catalog.schemas) + { + array.add(schema.adaptToJson(schemaItem)); + } + catalogsBuilder.add(catalog.name, array); + } + + return catalogsBuilder.build(); + } + + @Override + public List adaptFromJson( + JsonObject catalogsJson) + { + List catalogs = new ArrayList<>(); + for (String catalogName: catalogsJson.keySet()) + { + JsonArray schemasJson = catalogsJson.getJsonArray(catalogName); + List schemas = new ArrayList<>(); + for (JsonValue item : schemasJson) + { + JsonObject schemaJson = (JsonObject) item; + SchemaConfig schemaElement = schema.adaptFromJson(schemaJson); + schemas.add(schemaElement); + } + catalogs.add(new CatalogedConfig(catalogName, schemas)); + } + + return catalogs; + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineManager.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineManager.java index 2d815b00b8..44918f788d 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineManager.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineManager.java @@ -38,6 +38,7 @@ import io.aklivity.zilla.runtime.engine.binding.Binding; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.CatalogConfig; +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; import io.aklivity.zilla.runtime.engine.config.ConfigException; import io.aklivity.zilla.runtime.engine.config.EngineConfig; @@ -48,6 +49,7 @@ import io.aklivity.zilla.runtime.engine.config.KindConfig; import io.aklivity.zilla.runtime.engine.config.MetricConfig; import io.aklivity.zilla.runtime.engine.config.MetricRefConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; import io.aklivity.zilla.runtime.engine.config.NamespaceConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; import io.aklivity.zilla.runtime.engine.config.TelemetryRefConfig; @@ -58,7 +60,7 @@ import io.aklivity.zilla.runtime.engine.internal.Tuning; import io.aklivity.zilla.runtime.engine.internal.config.NamespaceAdapter; import io.aklivity.zilla.runtime.engine.internal.layouts.BindingsLayout; -import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; +import io.aklivity.zilla.runtime.engine.namespace.NamespacedId; import io.aklivity.zilla.runtime.engine.resolver.Resolver; public class EngineManager @@ -242,6 +244,28 @@ private void process( binding.vaultId = resolver.resolve(binding.vault); } + if (binding.catalogs != null) + { + for (CatalogedConfig cataloged : binding.catalogs) + { + cataloged.id = resolver.resolve(cataloged.name); + } + } + + if (binding.options != null) + { + for (ModelConfig model : binding.options.models) + { + if (model.cataloged != null) + { + for (CatalogedConfig cataloged : model.cataloged) + { + cataloged.id = resolver.resolve(cataloged.name); + } + } + } + } + for (RouteConfig route : binding.routes) { route.id = resolver.resolve(route.exit); @@ -322,6 +346,8 @@ private void register( register(namespace); } } + + extensions.forEach(e -> e.onRegistered(context)); } private void unregister( @@ -334,6 +360,8 @@ private void unregister( unregister(namespace); } } + + extensions.forEach(e -> e.onUnregistered(context)); } private void register( @@ -343,7 +371,6 @@ private void register( .map(d -> d.attach(namespace)) .reduce(CompletableFuture::allOf) .ifPresent(CompletableFuture::join); - extensions.forEach(e -> e.onRegistered(context)); } private void unregister( @@ -355,7 +382,6 @@ private void unregister( .map(d -> d.detach(namespace)) .reduce(CompletableFuture::allOf) .ifPresent(CompletableFuture::join); - extensions.forEach(e -> e.onUnregistered(context)); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineRegistry.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineRegistry.java index d5425348be..bc2695a7c9 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineRegistry.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineRegistry.java @@ -27,10 +27,10 @@ import io.aklivity.zilla.runtime.engine.config.NamespaceConfig; import io.aklivity.zilla.runtime.engine.exporter.ExporterContext; import io.aklivity.zilla.runtime.engine.guard.GuardContext; -import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; import io.aklivity.zilla.runtime.engine.metrics.Collector; import io.aklivity.zilla.runtime.engine.metrics.Metric; import io.aklivity.zilla.runtime.engine.metrics.MetricContext; +import io.aklivity.zilla.runtime.engine.namespace.NamespacedId; import io.aklivity.zilla.runtime.engine.util.function.ObjectLongLongFunction; import io.aklivity.zilla.runtime.engine.vault.VaultContext; @@ -167,8 +167,8 @@ private void attachNamespace( NamespaceConfig namespace) { NamespaceRegistry registry = - new NamespaceRegistry(namespace, bindingsByType, guardsByType, vaultsByType, catalogsByType, metricsByName, - exportersByType, supplyLabelId, this::resolveMetric, exporterAttached, exporterDetached, + new NamespaceRegistry(namespace, bindingsByType, guardsByType, vaultsByType, catalogsByType, + metricsByName, exportersByType, supplyLabelId, this::resolveMetric, exporterAttached, exporterDetached, supplyMetricRecorder, detachBinding, collector); namespacesById.put(registry.namespaceId(), registry); registry.attach(); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineWorker.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineWorker.java index 18f5fa564c..63be6936fb 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineWorker.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/EngineWorker.java @@ -60,7 +60,6 @@ import java.util.function.LongFunction; import java.util.function.LongSupplier; import java.util.function.LongUnaryOperator; -import java.util.function.ToLongFunction; import org.agrona.DeadlineTimerWheel; import org.agrona.DeadlineTimerWheel.TimerHandler; @@ -95,8 +94,8 @@ import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; import io.aklivity.zilla.runtime.engine.concurrent.Signaler; import io.aklivity.zilla.runtime.engine.config.BindingConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; import io.aklivity.zilla.runtime.engine.config.NamespaceConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; import io.aklivity.zilla.runtime.engine.exporter.Exporter; import io.aklivity.zilla.runtime.engine.exporter.ExporterContext; import io.aklivity.zilla.runtime.engine.exporter.ExporterHandler; @@ -113,7 +112,6 @@ import io.aklivity.zilla.runtime.engine.internal.layouts.metrics.HistogramsLayout; import io.aklivity.zilla.runtime.engine.internal.layouts.metrics.ScalarsLayout; import io.aklivity.zilla.runtime.engine.internal.poller.Poller; -import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; import io.aklivity.zilla.runtime.engine.internal.stream.StreamId; import io.aklivity.zilla.runtime.engine.internal.stream.Target; import io.aklivity.zilla.runtime.engine.internal.types.stream.AbortFW; @@ -130,10 +128,13 @@ import io.aklivity.zilla.runtime.engine.metrics.Metric; import io.aklivity.zilla.runtime.engine.metrics.MetricContext; import io.aklivity.zilla.runtime.engine.metrics.MetricGroup; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.engine.namespace.NamespacedId; import io.aklivity.zilla.runtime.engine.poller.PollerKey; import io.aklivity.zilla.runtime.engine.util.function.LongLongFunction; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.engine.validator.ValidatorFactory; import io.aklivity.zilla.runtime.engine.vault.Vault; import io.aklivity.zilla.runtime.engine.vault.VaultContext; import io.aklivity.zilla.runtime.engine.vault.VaultHandler; @@ -198,6 +199,7 @@ public class EngineWorker implements EngineContext, Agent private final ElektronSignaler signaler; private final Long2ObjectHashMap correlations; private final Long2ObjectHashMap exportersById; + private final Map modelsByType; private final EngineRegistry registry; private final Deque taskQueue; @@ -208,7 +210,6 @@ public class EngineWorker implements EngineContext, Agent private final ScalarsLayout countersLayout; private final ScalarsLayout gaugesLayout; private final HistogramsLayout histogramsLayout; - private final ValidatorFactory validatorFactory; private long initialId; private long promiseId; private long traceId; @@ -228,8 +229,8 @@ public EngineWorker( Collection guards, Collection vaults, Collection catalogs, + Collection models, Collection metricGroups, - ValidatorFactory validatorFactory, Collector collector, int index, boolean readonly) @@ -372,6 +373,14 @@ public EngineWorker( catalogsByType.put(type, catalog.supply(this)); } + Map modelsByType = new LinkedHashMap<>(); + for (Model model : models) + { + String type = model.name(); + modelsByType.put(type, model.supply(this)); + } + this.modelsByType = modelsByType; + Map metricsByName = new LinkedHashMap<>(); for (MetricGroup metricGroup : metricGroups) { @@ -392,12 +401,12 @@ public EngineWorker( bindingsByType::get, guardsByType::get, vaultsByType::get, catalogsByType::get, metricsByName::get, exportersByType::get, labels::supplyLabelId, this::onExporterAttached, this::onExporterDetached, this::supplyMetricWriter, this::detachStreams, collector); + this.taskQueue = new ConcurrentLinkedDeque<>(); this.correlations = new Long2ObjectHashMap<>(); this.idleStrategy = idleStrategy; this.errorHandler = errorHandler; this.exportersById = new Long2ObjectHashMap<>(); - this.validatorFactory = validatorFactory; } public static int indexOfId( @@ -654,6 +663,30 @@ public CatalogHandler supplyCatalog( return catalog != null ? catalog.handler() : null; } + @Override + public ValidatorHandler supplyValidator( + ModelConfig config) + { + ModelContext model = modelsByType.get(config.model); + return model != null ? model.supplyValidatorHandler(config) : null; + } + + @Override + public ConverterHandler supplyReadConverter( + ModelConfig config) + { + ModelContext model = modelsByType.get(config.model); + return model != null ? model.supplyReadConverterHandler(config) : null; + } + + @Override + public ConverterHandler supplyWriteConverter( + ModelConfig config) + { + ModelContext model = modelsByType.get(config.model); + return model != null ? model.supplyWriteConverterHandler(config) : null; + } + @Override public URL resolvePath( String path) @@ -862,14 +895,6 @@ public LongConsumer supplyHistogramWriter( return histogramsLayout.supplyWriter(bindingId, metricId); } - @Override - public Validator createValidator( - ValidatorConfig validator, - ToLongFunction resolveId) - { - return validatorFactory.create(validator, resolveId, this::supplyCatalog); - } - private void onSystemMessage( int msgTypeId, DirectBuffer buffer, diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/NamespaceRegistry.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/NamespaceRegistry.java index ce36736c1d..86bee85e32 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/NamespaceRegistry.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/NamespaceRegistry.java @@ -43,10 +43,10 @@ import io.aklivity.zilla.runtime.engine.exporter.ExporterContext; import io.aklivity.zilla.runtime.engine.exporter.ExporterHandler; import io.aklivity.zilla.runtime.engine.guard.GuardContext; -import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; import io.aklivity.zilla.runtime.engine.metrics.Collector; import io.aklivity.zilla.runtime.engine.metrics.Metric; import io.aklivity.zilla.runtime.engine.metrics.MetricContext; +import io.aklivity.zilla.runtime.engine.namespace.NamespacedId; import io.aklivity.zilla.runtime.engine.util.function.ObjectLongLongFunction; import io.aklivity.zilla.runtime.engine.vault.VaultContext; diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/metrics/reader/HistogramRecord.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/metrics/reader/HistogramRecord.java index 902297fd14..4f919490d6 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/metrics/reader/HistogramRecord.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/metrics/reader/HistogramRecord.java @@ -17,7 +17,7 @@ import static io.aklivity.zilla.runtime.engine.internal.layouts.metrics.HistogramsLayout.BUCKETS; import static io.aklivity.zilla.runtime.engine.internal.layouts.metrics.HistogramsLayout.BUCKET_LIMITS; -import static io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId.namespaceId; +import static io.aklivity.zilla.runtime.engine.namespace.NamespacedId.namespaceId; import java.util.Objects; import java.util.function.LongFunction; diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/metrics/reader/ScalarRecord.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/metrics/reader/ScalarRecord.java index b94a8d184d..ef87f5af01 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/metrics/reader/ScalarRecord.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/metrics/reader/ScalarRecord.java @@ -15,7 +15,7 @@ */ package io.aklivity.zilla.runtime.engine.metrics.reader; -import static io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId.namespaceId; +import static io.aklivity.zilla.runtime.engine.namespace.NamespacedId.namespaceId; import java.util.Objects; import java.util.function.LongFunction; diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/TestValidator.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ConverterHandler.java similarity index 61% rename from runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/TestValidator.java rename to runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ConverterHandler.java index c79f9e871e..dccd29248c 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/TestValidator.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ConverterHandler.java @@ -13,37 +13,31 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.engine.test.internal.validator; +package io.aklivity.zilla.runtime.engine.model; import org.agrona.DirectBuffer; -import io.aklivity.zilla.runtime.engine.validator.Validator; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; -public class TestValidator implements Validator +public interface ConverterHandler { - @Override - public boolean read( - DirectBuffer data, - int index, - int length) + ConverterHandler NONE = (data, index, length, next) -> { - return validate(data, index, length); - } + next.accept(data, index, length); + return length; + }; - @Override - public boolean write( + int convert( DirectBuffer data, int index, - int length) - { - return validate(data, index, length); - } + int length, + ValueConsumer next); - private boolean validate( + default int padding( DirectBuffer data, int index, int length) { - return length == 13; + return 0; } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaBrokerInfo.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/Model.java similarity index 65% rename from runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaBrokerInfo.java rename to runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/Model.java index d4b9b1c4a0..d193d4b412 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaBrokerInfo.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/Model.java @@ -13,21 +13,18 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.kafka.internal.stream; +package io.aklivity.zilla.runtime.engine.model; -final class KafkaBrokerInfo +import java.net.URL; + +import io.aklivity.zilla.runtime.engine.EngineContext; + +public interface Model { - final int brokerId; - final String host; - final int port; + String name(); + + ModelContext supply( + EngineContext context); - KafkaBrokerInfo( - int brokerId, - String host, - int port) - { - this.brokerId = brokerId; - this.host = host; - this.port = port; - } + URL type(); } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ModelContext.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ModelContext.java new file mode 100644 index 0000000000..14609ced0c --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ModelContext.java @@ -0,0 +1,33 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.model; + +import io.aklivity.zilla.runtime.engine.config.ModelConfig; + +public interface ModelContext +{ + ConverterHandler supplyReadConverterHandler( + ModelConfig config); + + ConverterHandler supplyWriteConverterHandler( + ModelConfig config); + + default ValidatorHandler supplyValidatorHandler( + ModelConfig config) + { + return null; + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ModelFactory.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ModelFactory.java new file mode 100644 index 0000000000..c4d4a400e5 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ModelFactory.java @@ -0,0 +1,73 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.model; + +import static java.util.Collections.unmodifiableMap; +import static java.util.Objects.requireNonNull; +import static java.util.ServiceLoader.load; + +import java.util.Collection; +import java.util.Map; +import java.util.ServiceLoader; +import java.util.TreeMap; + +import io.aklivity.zilla.runtime.engine.Configuration; + +public final class ModelFactory +{ + private final Map modelSpis; + + public static ModelFactory instantiate() + { + return instantiate(load(ModelFactorySpi.class)); + } + + public Iterable names() + { + return modelSpis.keySet(); + } + + public Model create( + String name, + Configuration config) + { + requireNonNull(name, "name"); + + ModelFactorySpi converterSpi = requireNonNull(modelSpis.get(name), () -> "Unrecognized Model name: " + name); + + return converterSpi.create(config); + } + + public Collection converterSpis() + { + return modelSpis.values(); + } + + private static ModelFactory instantiate( + ServiceLoader converters) + { + Map converterSpisByName = new TreeMap<>(); + converters.forEach(converterSpi -> converterSpisByName.put(converterSpi.type(), converterSpi)); + + return new ModelFactory(unmodifiableMap(converterSpisByName)); + } + + private ModelFactory( + Map modelSpis) + { + this.modelSpis = modelSpis; + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicType.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ModelFactorySpi.java similarity index 63% rename from runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicType.java rename to runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ModelFactorySpi.java index 57685c85c7..8929f90f86 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicType.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ModelFactorySpi.java @@ -13,20 +13,19 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.kafka.internal.config; +package io.aklivity.zilla.runtime.engine.model; -import io.aklivity.zilla.runtime.engine.validator.Validator; +import java.net.URL; -public class KafkaTopicType +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.factory.FactorySpi; + +public interface ModelFactorySpi extends FactorySpi { - public final Validator key; - public final Validator value; + String type(); + + URL schema(); - public KafkaTopicType( - Validator key, - Validator value) - { - this.key = key; - this.value = value; - } + Model create( + Configuration config); } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ValidatorHandler.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ValidatorHandler.java new file mode 100644 index 0000000000..6e979b4abf --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/ValidatorHandler.java @@ -0,0 +1,43 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.model; + +import org.agrona.DirectBuffer; + +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; + +public interface ValidatorHandler +{ + int FLAGS_COMPLETE = 0x03; + int FLAGS_INIT = 0x02; + int FLAGS_FIN = 0x01; + + boolean validate( + int flags, + DirectBuffer data, + int index, + int length, + ValueConsumer next); + + default boolean validate( + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + return validate(FLAGS_COMPLETE, data, index, length, next); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/validator/Validator.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/function/ValueConsumer.java similarity index 77% rename from runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/validator/Validator.java rename to runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/function/ValueConsumer.java index 0f9b2cbe64..f0076fc2d0 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/validator/Validator.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/model/function/ValueConsumer.java @@ -13,19 +13,17 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.engine.validator; +package io.aklivity.zilla.runtime.engine.model.function; import org.agrona.DirectBuffer; -public interface Validator +@FunctionalInterface +public interface ValueConsumer { - boolean read( - DirectBuffer data, - int index, - int length); + ValueConsumer NOP = (buffer, index, length) -> {}; - boolean write( - DirectBuffer data, + void accept( + DirectBuffer buffer, int index, int length); } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/stream/NamespacedId.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/namespace/NamespacedId.java similarity index 95% rename from runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/stream/NamespacedId.java rename to runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/namespace/NamespacedId.java index d3439a92d2..7d6b742873 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/stream/NamespacedId.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/namespace/NamespacedId.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.engine.internal.stream; +package io.aklivity.zilla.runtime.engine.namespace; public final class NamespacedId { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/validator/ValidatorFactory.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/validator/ValidatorFactory.java deleted file mode 100644 index 46816e8d0b..0000000000 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/validator/ValidatorFactory.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc. - * - * Aklivity licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.aklivity.zilla.runtime.engine.validator; - -import static java.util.Objects.requireNonNull; -import static java.util.ServiceLoader.load; - -import java.util.Collection; -import java.util.Map; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; - -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.factory.Factory; - -public final class ValidatorFactory extends Factory -{ - private final Map validatorSpis; - - public static ValidatorFactory instantiate() - { - return instantiate(load(ValidatorFactorySpi.class), ValidatorFactory::new); - } - - public Validator create( - ValidatorConfig config, - ToLongFunction resolveId, - LongFunction supplyCatalog) - { - String type = config.type; - requireNonNull(type, "name"); - - ValidatorFactorySpi validatorSpi = requireNonNull(validatorSpis.get(type), () -> "Unrecognized validator name: " + type); - - return validatorSpi.create(config, resolveId, supplyCatalog); - } - - public Collection validatorSpis() - { - return validatorSpis.values(); - } - - private ValidatorFactory( - Map validatorSpis) - { - this.validatorSpis = validatorSpis; - } -} diff --git a/runtime/engine/src/main/moditect/module-info.java b/runtime/engine/src/main/moditect/module-info.java index 3eb224c7a4..4c15cb57b3 100644 --- a/runtime/engine/src/main/moditect/module-info.java +++ b/runtime/engine/src/main/moditect/module-info.java @@ -21,16 +21,18 @@ exports io.aklivity.zilla.runtime.engine.binding; exports io.aklivity.zilla.runtime.engine.binding.function; exports io.aklivity.zilla.runtime.engine.catalog; + exports io.aklivity.zilla.runtime.engine.model; + exports io.aklivity.zilla.runtime.engine.model.function; exports io.aklivity.zilla.runtime.engine.exporter; exports io.aklivity.zilla.runtime.engine.factory; exports io.aklivity.zilla.runtime.engine.guard; + exports io.aklivity.zilla.runtime.engine.namespace; exports io.aklivity.zilla.runtime.engine.metrics; exports io.aklivity.zilla.runtime.engine.metrics.reader; exports io.aklivity.zilla.runtime.engine.reader; exports io.aklivity.zilla.runtime.engine.resolver; exports io.aklivity.zilla.runtime.engine.util.function; exports io.aklivity.zilla.runtime.engine.vault; - exports io.aklivity.zilla.runtime.engine.validator; exports io.aklivity.zilla.runtime.engine.ext; @@ -53,15 +55,15 @@ uses io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; uses io.aklivity.zilla.runtime.engine.config.CompositeBindingAdapterSpi; uses io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; - uses io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi; + uses io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi; uses io.aklivity.zilla.runtime.engine.config.WithConfigAdapterSpi; uses io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi; uses io.aklivity.zilla.runtime.engine.catalog.CatalogFactorySpi; + uses io.aklivity.zilla.runtime.engine.model.ModelFactorySpi; uses io.aklivity.zilla.runtime.engine.exporter.ExporterFactorySpi; uses io.aklivity.zilla.runtime.engine.guard.GuardFactorySpi; uses io.aklivity.zilla.runtime.engine.metrics.MetricGroupFactorySpi; - uses io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi; uses io.aklivity.zilla.runtime.engine.vault.VaultFactorySpi; uses io.aklivity.zilla.runtime.engine.ext.EngineExtSpi; uses io.aklivity.zilla.runtime.engine.resolver.ResolverFactorySpi; diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java index be612d1c49..7c2b8334cb 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java @@ -358,6 +358,62 @@ public void shouldWriteBindingWithTelemetry() "\"telemetry\":{\"metrics\":[\"test.counter\"]}}}")); } + @Test + public void shouldWriteBindingWithCatalog() + { + BindingConfig[] bindings = + { + BindingConfig.builder() + .namespace("test") + .name("test") + .type("test") + .kind(SERVER) + .catalog() + .name("catalog0") + .schema() + .subject("echo") + .build() + .build() + .build() + }; + + String text = jsonb.toJson(bindings); + + assertThat(text, not(nullValue())); + assertThat(text, equalTo("{\"test\":{\"type\":\"test\",\"kind\":\"server\",\"catalog\":" + + "[{\"catalog0\":[{\"subject\":\"echo\"}]}]}}")); + } + + @Test + public void shouldReadBindingWithCatalog() + { + String text = + "{" + + " \"test\":" + + " {" + + " \"type\": \"test\"," + + " \"kind\": \"server\"," + + " \"catalog\":" + + " {" + + " \"catalog0\":" + + " [" + + " {" + + " \"subject\": \"echo\"" + + " }" + + " ]" + + " }" + + " }" + + "}"; + + BindingConfig[] bindings = jsonb.fromJson(text, BindingConfig[].class); + + assertThat(bindings[0], not(nullValue())); + assertThat(bindings[0].name, equalTo("test")); + assertThat(bindings[0].kind, equalTo(SERVER)); + assertThat(bindings[0].catalogs, hasSize(1)); + assertThat(bindings[0].catalogs.stream().findFirst().get().name, equalTo("catalog0")); + } + @Test public void shouldWriteBindingWithRemoteServerKind() { diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/model/ModelFactoryTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/model/ModelFactoryTest.java new file mode 100644 index 0000000000..552cd627c1 --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/model/ModelFactoryTest.java @@ -0,0 +1,50 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.internal.model; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.mock; + +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.engine.model.ModelFactory; +import io.aklivity.zilla.runtime.engine.test.internal.model.TestConverterHandler; +import io.aklivity.zilla.runtime.engine.test.internal.model.TestModel; +import io.aklivity.zilla.runtime.engine.test.internal.model.TestModelContext; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; + +public class ModelFactoryTest +{ + @Test + public void shouldLoadAndCreate() + { + Configuration config = new Configuration(); + ModelFactory factory = ModelFactory.instantiate(); + Model model = factory.create("test", config); + + TestModelConfig converterConfig = TestModelConfig.builder().length(4).build(); + ModelContext context = new TestModelContext(mock(EngineContext.class)); + + assertThat(model, instanceOf(TestModel.class)); + assertThat(context.supplyReadConverterHandler(converterConfig), instanceOf(TestConverterHandler.class)); + assertThat(context.supplyWriteConverterHandler(converterConfig), instanceOf(TestConverterHandler.class)); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/model/ModelTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/model/ModelTest.java new file mode 100644 index 0000000000..94c3361c7b --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/model/ModelTest.java @@ -0,0 +1,58 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.internal.model; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; + +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.engine.test.internal.model.TestConverterHandler; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; + +public class ModelTest +{ + @Test + public void shouldValidateWithoutFlag() + { + LongFunction supplyCatalog = mock(LongFunction.class); + ModelConfig config = TestModelConfig.builder() + .length(4) + .catalog() + .name("test0") + .schema() + .id(1) + .build() + .build() + .read(true) + .build(); + ConverterHandler handler = new TestConverterHandler(TestModelConfig.class.cast(config), supplyCatalog); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0, 0, 0, 42}; + data.wrap(bytes, 0, bytes.length); + assertEquals(data.capacity(), handler.convert(data, 0, data.capacity(), ValueConsumer.NOP)); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/validator/config/ValidatorConfigAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/model/config/ModelConfigAdapterTest.java similarity index 65% rename from runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/validator/config/ValidatorConfigAdapterTest.java rename to runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/model/config/ModelConfigAdapterTest.java index d96794dd25..d88e442f4f 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/validator/config/ValidatorConfigAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/model/config/ModelConfigAdapterTest.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.engine.internal.validator.config; +package io.aklivity.zilla.runtime.engine.internal.model.config; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; @@ -27,18 +27,18 @@ import org.junit.Before; import org.junit.Test; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapter; -import io.aklivity.zilla.runtime.engine.test.internal.validator.config.TestValidatorConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfigAdapter; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; -public class ValidatorConfigAdapterTest +public class ModelConfigAdapterTest { private Jsonb jsonb; @Before public void initJson() { - ValidatorConfigAdapter adapter = new ValidatorConfigAdapter(); + ModelConfigAdapter adapter = new ModelConfigAdapter(); adapter.adaptType("test"); JsonbConfig config = new JsonbConfig() .withAdapters(adapter); @@ -46,31 +46,31 @@ public void initJson() } @Test - public void shouldReadValidator() + public void shouldReadConverter() { // GIVEN String json = "{" + - "\"type\": \"test\"" + + "\"model\": \"test\"" + "}"; // WHEN - ValidatorConfig validator = jsonb.fromJson(json, ValidatorConfig.class); + ModelConfig model = jsonb.fromJson(json, ModelConfig.class); // THEN - assertThat(validator, not(nullValue())); - assertThat(validator.type, equalTo("test")); + assertThat(model, not(nullValue())); + assertThat(model.model, equalTo("test")); } @Test - public void shouldWriteValidator() + public void shouldWriteConverter() { // GIVEN String expectedJson = "\"test\""; - ValidatorConfig validator = TestValidatorConfig.builder().build(); + ModelConfig model = TestModelConfig.builder().build(); // WHEN - String json = jsonb.toJson(validator); + String json = jsonb.toJson(model); // THEN assertThat(json, not(nullValue())); diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/validator/ValidatorFactoryTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/validator/ValidatorFactoryTest.java deleted file mode 100644 index 7d37adebab..0000000000 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/validator/ValidatorFactoryTest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc. - * - * Aklivity licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.aklivity.zilla.runtime.engine.internal.validator; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Mockito.mock; - -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; - -import org.junit.Test; - -import io.aklivity.zilla.runtime.engine.Configuration; -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.test.internal.validator.TestValidator; -import io.aklivity.zilla.runtime.engine.test.internal.validator.config.TestValidatorConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.engine.validator.ValidatorFactory; - -public class ValidatorFactoryTest -{ - @Test - @SuppressWarnings("unchecked") - public void shouldCreate() - { - // GIVEN - Configuration config = new Configuration(); - ValidatorConfig testValidator = new TestValidatorConfig(); - ToLongFunction resolveId = mock(ToLongFunction.class); - LongFunction supplyCatalog = mock(LongFunction.class); - ValidatorFactory factory = ValidatorFactory.instantiate(); - - // WHEN - Validator validator = factory.create(testValidator, resolveId, supplyCatalog); - - // THEN - assertThat(validator, instanceOf(TestValidator.class)); - } -} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/HistogramRecordTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/HistogramRecordTest.java index 7df1ee9698..528f5d77d3 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/HistogramRecordTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/HistogramRecordTest.java @@ -25,7 +25,7 @@ import org.junit.Test; -import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; +import io.aklivity.zilla.runtime.engine.namespace.NamespacedId; public class HistogramRecordTest { diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/MetricsReaderTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/MetricsReaderTest.java index 51da618d39..e8dbc87e04 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/MetricsReaderTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/MetricsReaderTest.java @@ -26,8 +26,8 @@ import org.junit.Test; -import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; import io.aklivity.zilla.runtime.engine.metrics.Collector; +import io.aklivity.zilla.runtime.engine.namespace.NamespacedId; public class MetricsReaderTest { diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/ScalarRecordTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/ScalarRecordTest.java index 4a67c2b4f1..d2cb22f3ea 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/ScalarRecordTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/metrics/reader/ScalarRecordTest.java @@ -25,7 +25,7 @@ import org.junit.Test; -import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; +import io.aklivity.zilla.runtime.engine.namespace.NamespacedId; public class ScalarRecordTest { diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/model/ModelFactoryTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/model/ModelFactoryTest.java new file mode 100644 index 0000000000..5ce28f2008 --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/model/ModelFactoryTest.java @@ -0,0 +1,71 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.model; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertNull; +import static org.mockito.Mockito.mock; + +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.test.internal.model.TestModel; +import io.aklivity.zilla.runtime.engine.test.internal.model.TestModelContext; +import io.aklivity.zilla.runtime.engine.test.internal.model.TestValidatorHandler; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; + +public class ModelFactoryTest +{ + @Test + public void shouldLoadAndCreate() + { + Configuration config = new Configuration(); + ModelFactory factory = ModelFactory.instantiate(); + Model model = factory.create("test", config); + + TestModelConfig modelConfig = TestModelConfig.builder().length(4).build(); + ModelContext context = new TestModelContext(mock(EngineContext.class)); + + assertThat(model, instanceOf(TestModel.class)); + assertThat(context.supplyValidatorHandler(modelConfig), instanceOf(TestValidatorHandler.class)); + } + + @Test + public void shouldCreateNullValidator() + { + TestModelConfig config = TestModelConfig.builder().length(4).build(); + ModelContext context = new ModelContext() + { + @Override + public ConverterHandler supplyReadConverterHandler( + ModelConfig config) + { + return null; + } + + @Override + public ConverterHandler supplyWriteConverterHandler( + ModelConfig config) + { + return null; + } + }; + assertNull(context.supplyValidatorHandler(config)); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/model/ModelTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/model/ModelTest.java new file mode 100644 index 0000000000..93d4b93e23 --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/model/ModelTest.java @@ -0,0 +1,56 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.model; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.engine.test.internal.model.TestModelContext; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; + +public class ModelTest +{ + @Test + public void shouldCreateAndVerifyNoOpValueConverter() + { + ConverterHandler converter = ConverterHandler.NONE; + + assertEquals(1, converter.convert(new UnsafeBuffer(), 1, 1, (b, i, l) -> {})); + } + + @Test + public void shouldValidateWithoutFlag() + { + TestModelConfig modelConfig = TestModelConfig.builder() + .length(4) + .build(); + ModelContext context = new TestModelContext(mock(EngineContext.class)); + ValidatorHandler handler = context.supplyValidatorHandler(modelConfig); + + DirectBuffer data = new UnsafeBuffer(); + + byte[] bytes = {0, 0, 0, 42}; + data.wrap(bytes, 0, bytes.length); + assertTrue(handler.validate(data, 0, data.capacity(), ValueConsumer.NOP)); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/model/function/ValueConsumerTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/model/function/ValueConsumerTest.java new file mode 100644 index 0000000000..4c3f5d76ca --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/model/function/ValueConsumerTest.java @@ -0,0 +1,42 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.model.function; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +public class ValueConsumerTest +{ + @Test + public void shouldDefaultOnMessageAndClose() + { + ValueConsumer next = (buffer, index, length) -> + { + assertNotNull(buffer); + assertTrue(index >= 0); + assertTrue(length >= 0); + }; + } + + @Test + public void shouldCreateNoOpValueConsumer() + { + ValueConsumer next = ValueConsumer.NOP; + assertNotNull(next); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/validator/ValidatorFactorySpi.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/DecoderTest.java similarity index 57% rename from runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/validator/ValidatorFactorySpi.java rename to runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/DecoderTest.java index 24455be435..176010c27f 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/validator/ValidatorFactorySpi.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/DecoderTest.java @@ -13,24 +13,23 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.engine.validator; +package io.aklivity.zilla.runtime.engine.test.internal.catalog; -import java.net.URL; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; +import static org.junit.Assert.assertEquals; + +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Test; import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.factory.FactorySpi; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; -public interface ValidatorFactorySpi extends FactorySpi +public class DecoderTest { - String type(); - - URL schema(); + @Test + public void shouldCreateAndVerifyIdentityDecoder() + { + CatalogHandler.Decoder decoder = CatalogHandler.Decoder.IDENTITY; - Validator create( - ValidatorConfig config, - ToLongFunction resolveId, - LongFunction supplyCatalog); + assertEquals(1, decoder.accept(1, new UnsafeBuffer(), 1, 1, ValueConsumer.NOP)); + } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/EncoderTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/EncoderTest.java new file mode 100644 index 0000000000..aec980f6ff --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/EncoderTest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.test.internal.catalog; + +import static org.junit.Assert.assertEquals; + +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Test; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; + +public class EncoderTest +{ + @Test + public void shouldCreateAndVerifyIdentityEncoder() + { + CatalogHandler.Encoder encoder = CatalogHandler.Encoder.IDENTITY; + + assertEquals(1, encoder.accept(1, new UnsafeBuffer(), 1, 1, ValueConsumer.NOP)); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/TestCatalogHandler.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/TestCatalogHandler.java index 9bf906f40c..a79668085a 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/TestCatalogHandler.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/TestCatalogHandler.java @@ -21,10 +21,12 @@ public class TestCatalogHandler implements CatalogHandler { private final String schema; + private final int id; public TestCatalogHandler( TestCatalogOptionsConfig options) { + this.id = options != null ? options.id : NO_SCHEMA_ID; this.schema = options != null ? options.schema : null; } @@ -34,7 +36,7 @@ public int register( String type, String schema) { - return 1; + return id; } @Override @@ -42,13 +44,13 @@ public int resolve( String subject, String version) { - return 1; + return id; } @Override public String resolve( int schemaId) { - return schema; + return schemaId == id ? schema : null; } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfig.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfig.java index 447bc5d701..c42ba3d0e9 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfig.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfig.java @@ -21,7 +21,9 @@ public class TestCatalogOptionsConfig extends OptionsConfig { + public final String subject; public final String schema; + public final int id; public static TestCatalogOptionsConfigBuilder builder() { @@ -35,8 +37,12 @@ public static TestCatalogOptionsConfigBuilder builder( } public TestCatalogOptionsConfig( + int id, + String subject, String schema) { + this.subject = subject; this.schema = schema; + this.id = id; } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfigAdapter.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfigAdapter.java index 6e1a96f7ac..b1c84d4003 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfigAdapter.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfigAdapter.java @@ -24,7 +24,9 @@ public class TestCatalogOptionsConfigAdapter implements OptionsConfigAdapterSpi { + private static final String SUBJECT = "subject"; private static final String SCHEMA = "schema"; + private static final String ID = "id"; @Override public Kind kind() @@ -58,16 +60,24 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - TestCatalogOptionsConfigBuilder testOptions = TestCatalogOptionsConfig.builder(); + TestCatalogOptionsConfigBuilder config = TestCatalogOptionsConfig.builder(); if (object != null) { + if (object.containsKey(SUBJECT)) + { + config.subject(object.getString(SUBJECT)); + } + if (object.containsKey(SCHEMA)) { - testOptions.schema(object.getString(SCHEMA)); + config.schema(object.getString(SCHEMA)); } - } - return testOptions.build(); + config.id(object.containsKey(ID) + ? object.getInt(ID) + : 0); + } + return config.build(); } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfigBuilder.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfigBuilder.java index b63940c387..61011f7bbf 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfigBuilder.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/catalog/config/TestCatalogOptionsConfigBuilder.java @@ -24,7 +24,9 @@ public final class TestCatalogOptionsConfigBuilder extends ConfigBuilder mapper; + private String subject; private String schema; + private int id; TestCatalogOptionsConfigBuilder( Function mapper) @@ -39,6 +41,13 @@ protected Class> thisType() return (Class>) getClass(); } + public TestCatalogOptionsConfigBuilder subject( + String subject) + { + this.subject = subject; + return this; + } + public TestCatalogOptionsConfigBuilder schema( String schema) { @@ -46,9 +55,16 @@ public TestCatalogOptionsConfigBuilder schema( return this; } + public TestCatalogOptionsConfigBuilder id( + int id) + { + this.id = id; + return this; + } + @Override public T build() { - return mapper.apply(new TestCatalogOptionsConfig(schema)); + return mapper.apply(new TestCatalogOptionsConfig(id, subject, schema)); } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/k3po/ext/behavior/ZillaScope.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/k3po/ext/behavior/ZillaScope.java index 47ea8a7753..1a6bbf5886 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/k3po/ext/behavior/ZillaScope.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/k3po/ext/behavior/ZillaScope.java @@ -36,7 +36,7 @@ import io.aklivity.zilla.runtime.engine.internal.budget.DefaultBudgetCreditor; import io.aklivity.zilla.runtime.engine.internal.budget.DefaultBudgetDebitor; import io.aklivity.zilla.runtime.engine.internal.layouts.BudgetsLayout; -import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; +import io.aklivity.zilla.runtime.engine.namespace.NamespacedId; import io.aklivity.zilla.runtime.engine.test.internal.k3po.ext.ZillaExtConfiguration; import io.aklivity.zilla.runtime.engine.test.internal.k3po.ext.behavior.layout.StreamsLayout; import io.aklivity.zilla.runtime.engine.test.internal.k3po.ext.types.stream.FlushFW; diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestConverterHandler.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestConverterHandler.java new file mode 100644 index 0000000000..63de57e233 --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestConverterHandler.java @@ -0,0 +1,75 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.test.internal.model; + +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; + +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.SchemaConfig; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; + +public class TestConverterHandler implements ConverterHandler +{ + private final int length; + private final int schemaId; + private final boolean read; + private final CatalogHandler handler; + private final SchemaConfig schema; + + public TestConverterHandler( + TestModelConfig config, + LongFunction supplyCatalog) + { + this.length = config.length; + this.read = config.read; + CatalogedConfig cataloged = config.cataloged != null && !config.cataloged.isEmpty() + ? config.cataloged.get(0) + : null; + schema = cataloged != null ? cataloged.schemas.get(0) : null; + schemaId = schema != null ? schema.id : 0; + this.handler = cataloged != null ? supplyCatalog.apply(cataloged.id) : null; + } + + @Override + public int padding( + DirectBuffer data, + int index, + int length) + { + return handler.encodePadding(); + } + + @Override + public int convert( + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + boolean valid = length == this.length; + if (valid) + { + next.accept(data, index, length); + } + return valid ? length : -1; + } +} + diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/config/TestValidatorConfig.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestModel.java similarity index 51% rename from runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/config/TestValidatorConfig.java rename to runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestModel.java index 59622c3bb5..80590ffceb 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/config/TestValidatorConfig.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestModel.java @@ -13,27 +13,34 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.engine.test.internal.validator.config; +package io.aklivity.zilla.runtime.engine.test.internal.model; -import java.util.function.Function; +import java.net.URL; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelContext; -public class TestValidatorConfig extends ValidatorConfig +public class TestModel implements Model { - public TestValidatorConfig() + public static final String NAME = "test"; + + @Override + public String name() { - super("test"); + return NAME; } - public static TestValidatorConfigBuilder builder( - Function mapper) + @Override + public ModelContext supply( + EngineContext context) { - return new TestValidatorConfigBuilder<>(mapper); + return new TestModelContext(context); } - public static TestValidatorConfigBuilder builder() + @Override + public URL type() { - return new TestValidatorConfigBuilder<>(TestValidatorConfig.class::cast); + return getClass().getResource("test.schema.patch.json"); } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestModelContext.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestModelContext.java new file mode 100644 index 0000000000..b15c72a9b4 --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestModelContext.java @@ -0,0 +1,58 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.test.internal.model; + +import java.util.function.LongFunction; + +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.model.ConverterHandler; +import io.aklivity.zilla.runtime.engine.model.ModelContext; +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; + +public class TestModelContext implements ModelContext +{ + private final LongFunction supplyCatalog; + + public TestModelContext( + EngineContext context) + { + this.supplyCatalog = context::supplyCatalog; + } + + @Override + public ConverterHandler supplyReadConverterHandler( + ModelConfig config) + { + return new TestConverterHandler(TestModelConfig.class.cast(config), supplyCatalog); + } + + @Override + public ConverterHandler supplyWriteConverterHandler( + ModelConfig config) + { + return new TestConverterHandler(TestModelConfig.class.cast(config), supplyCatalog); + } + + @Override + public ValidatorHandler supplyValidatorHandler( + ModelConfig config) + { + return new TestValidatorHandler(TestModelConfig.class.cast(config)); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/TestValidatorFactory.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestModelFactorySpi.java similarity index 56% rename from runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/TestValidatorFactory.java rename to runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestModelFactorySpi.java index 25b56bd494..d1a9e73c57 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/TestValidatorFactory.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestModelFactorySpi.java @@ -13,18 +13,15 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.engine.test.internal.validator; +package io.aklivity.zilla.runtime.engine.test.internal.model; import java.net.URL; -import java.util.function.LongFunction; -import java.util.function.ToLongFunction; -import io.aklivity.zilla.runtime.engine.catalog.CatalogHandler; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.validator.Validator; -import io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi; +import io.aklivity.zilla.runtime.engine.Configuration; +import io.aklivity.zilla.runtime.engine.model.Model; +import io.aklivity.zilla.runtime.engine.model.ModelFactorySpi; -public class TestValidatorFactory implements ValidatorFactorySpi +public class TestModelFactorySpi implements ModelFactorySpi { @Override public String type() @@ -39,11 +36,9 @@ public URL schema() } @Override - public Validator create( - ValidatorConfig config, - ToLongFunction resolveId, - LongFunction supplyCatalog) + public Model create( + Configuration config) { - return new TestValidator(); + return new TestModel(); } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestValidatorHandler.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestValidatorHandler.java new file mode 100644 index 0000000000..76b2ded963 --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/TestValidatorHandler.java @@ -0,0 +1,57 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.test.internal.model; + +import org.agrona.DirectBuffer; + +import io.aklivity.zilla.runtime.engine.model.ValidatorHandler; +import io.aklivity.zilla.runtime.engine.model.function.ValueConsumer; +import io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfig; + +public class TestValidatorHandler implements ValidatorHandler +{ + private final int length; + private int pendingBytes; + + public TestValidatorHandler( + TestModelConfig config) + { + this.length = config.length; + } + + @Override + public boolean validate( + int flags, + DirectBuffer data, + int index, + int length, + ValueConsumer next) + { + boolean valid = false; + + pendingBytes = this.length - length; + + if ((flags & FLAGS_FIN) != 0x00) + { + valid = pendingBytes == 0; + } + else + { + valid = pendingBytes >= 0; + } + return valid; + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/config/TestModelConfig.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/config/TestModelConfig.java new file mode 100644 index 0000000000..73f0f06c7e --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/config/TestModelConfig.java @@ -0,0 +1,49 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.test.internal.model.config; + +import java.util.List; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; + +public class TestModelConfig extends ModelConfig +{ + public final int length; + public final boolean read; + + public TestModelConfig( + int length, + List cataloged, + boolean read) + { + super("test", cataloged); + this.length = length; + this.read = read; + } + + public static TestModelConfigBuilder builder( + Function mapper) + { + return new TestModelConfigBuilder<>(mapper); + } + + public static TestModelConfigBuilder builder() + { + return new TestModelConfigBuilder<>(TestModelConfig.class::cast); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/config/TestModelConfigAdapter.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/config/TestModelConfigAdapter.java new file mode 100644 index 0000000000..c9c0778e00 --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/config/TestModelConfigAdapter.java @@ -0,0 +1,90 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.test.internal.model.config; + +import java.util.LinkedList; +import java.util.List; + +import jakarta.json.Json; +import jakarta.json.JsonArray; +import jakarta.json.JsonObject; +import jakarta.json.JsonValue; +import jakarta.json.bind.adapter.JsonbAdapter; + +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; +import io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi; +import io.aklivity.zilla.runtime.engine.config.SchemaConfig; +import io.aklivity.zilla.runtime.engine.config.SchemaConfigAdapter; + +public class TestModelConfigAdapter implements ModelConfigAdapterSpi, JsonbAdapter +{ + private static final String TEST = "test"; + private static final String LENGTH = "length"; + private static final String CAPABILITY = "capability"; + private static final String READ = "read"; + private static final String CATALOG_NAME = "catalog"; + + private final SchemaConfigAdapter schema = new SchemaConfigAdapter(); + + @Override + public String type() + { + return TEST; + } + + @Override + public JsonValue adaptToJson( + ModelConfig config) + { + return Json.createValue(TEST); + } + + @Override + public TestModelConfig adaptFromJson( + JsonValue value) + { + JsonObject object = (JsonObject) value; + + int length = object.containsKey(LENGTH) + ? object.getInt(LENGTH) + : 0; + + boolean read = object.containsKey(CAPABILITY) + ? object.getString(CAPABILITY).equals(READ) + : false; + + List catalogs = new LinkedList<>(); + if (object.containsKey(CATALOG_NAME)) + { + JsonObject catalogsJson = object.getJsonObject(CATALOG_NAME); + for (String catalogName: catalogsJson.keySet()) + { + JsonArray schemasJson = catalogsJson.getJsonArray(catalogName); + List schemas = new LinkedList<>(); + for (JsonValue item : schemasJson) + { + JsonObject schemaJson = (JsonObject) item; + SchemaConfig schemaElement = schema.adaptFromJson(schemaJson); + schemas.add(schemaElement); + } + catalogs.add(new CatalogedConfig(catalogName, schemas)); + } + } + + return new TestModelConfig(length, catalogs, read); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/config/TestModelConfigBuilder.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/config/TestModelConfigBuilder.java new file mode 100644 index 0000000000..5f3842fd3d --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/model/config/TestModelConfigBuilder.java @@ -0,0 +1,83 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.test.internal.model.config; + +import java.util.LinkedList; +import java.util.List; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.CatalogedConfig; +import io.aklivity.zilla.runtime.engine.config.CatalogedConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.ModelConfig; + +public class TestModelConfigBuilder extends ConfigBuilder> +{ + private final Function mapper; + + private int length; + private boolean read; + private List catalogs; + + TestModelConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + + public TestModelConfigBuilder length( + int length) + { + this.length = length; + return this; + } + + public TestModelConfigBuilder read( + boolean read) + { + this.read = read; + return this; + } + + public CatalogedConfigBuilder> catalog() + { + return CatalogedConfig.builder(this::catalog); + } + + public TestModelConfigBuilder catalog( + CatalogedConfig catalog) + { + if (catalogs == null) + { + catalogs = new LinkedList<>(); + } + catalogs.add(catalog); + return this; + } + + @Override + public T build() + { + return mapper.apply(new TestModelConfig(length, catalogs, read)); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/config/TestValidatorConfigAdapter.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/config/TestValidatorConfigAdapter.java deleted file mode 100644 index 0c10b52f5b..0000000000 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/config/TestValidatorConfigAdapter.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc. - * - * Aklivity licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.aklivity.zilla.runtime.engine.test.internal.validator.config; - -import jakarta.json.Json; -import jakarta.json.JsonValue; -import jakarta.json.bind.adapter.JsonbAdapter; - -import io.aklivity.zilla.runtime.engine.config.SchemaConfigAdapter; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi; - -public class TestValidatorConfigAdapter implements ValidatorConfigAdapterSpi, JsonbAdapter -{ - private static final String TEST = "test"; - - private final SchemaConfigAdapter schema = new SchemaConfigAdapter(); - - @Override - public String type() - { - return TEST; - } - - @Override - public JsonValue adaptToJson( - ValidatorConfig config) - { - return Json.createValue(TEST); - } - - @Override - public ValidatorConfig adaptFromJson( - JsonValue value) - { - return TestValidatorConfig.builder().build(); - } -} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/config/TestValidatorConfigBuilder.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/config/TestValidatorConfigBuilder.java deleted file mode 100644 index d8a27e4942..0000000000 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/validator/config/TestValidatorConfigBuilder.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc. - * - * Aklivity licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.aklivity.zilla.runtime.engine.test.internal.validator.config; - -import java.util.function.Function; - -import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -import io.aklivity.zilla.runtime.engine.config.ValidatorConfig; - -public class TestValidatorConfigBuilder extends ConfigBuilder> -{ - private final Function mapper; - - TestValidatorConfigBuilder( - Function mapper) - { - this.mapper = mapper; - } - - @Override - @SuppressWarnings("unchecked") - protected Class> thisType() - { - return (Class>) getClass(); - } - - @Override - public T build() - { - return mapper.apply(new TestValidatorConfig()); - } -} diff --git a/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi b/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi new file mode 100644 index 0000000000..97ebb30d31 --- /dev/null +++ b/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ModelConfigAdapterSpi @@ -0,0 +1 @@ +io.aklivity.zilla.runtime.engine.test.internal.model.config.TestModelConfigAdapter diff --git a/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi b/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi deleted file mode 100644 index f41416a365..0000000000 --- a/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ValidatorConfigAdapterSpi +++ /dev/null @@ -1 +0,0 @@ -io.aklivity.zilla.runtime.engine.test.internal.validator.config.TestValidatorConfigAdapter diff --git a/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi b/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi new file mode 100644 index 0000000000..b161468dac --- /dev/null +++ b/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.model.ModelFactorySpi @@ -0,0 +1 @@ +io.aklivity.zilla.runtime.engine.test.internal.model.TestModelFactorySpi diff --git a/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi b/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi index 159cbd9dba..97cbe9bfbf 100644 --- a/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi +++ b/runtime/engine/src/test/resources/META-INF/services/io.aklivity.zilla.runtime.engine.validator.ValidatorFactorySpi @@ -1 +1 @@ -io.aklivity.zilla.runtime.engine.test.internal.validator.TestValidatorFactory +io.aklivity.zilla.runtime.engine.test.internal.validator.TestValidatorFactorySpi diff --git a/runtime/exporter-prometheus/pom.xml b/runtime/exporter-prometheus/pom.xml index eb1ae22f1b..475b40d7d8 100644 --- a/runtime/exporter-prometheus/pom.xml +++ b/runtime/exporter-prometheus/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/guard-jwt/pom.xml b/runtime/guard-jwt/pom.xml index dca2ac63c1..a3a99e84dc 100644 --- a/runtime/guard-jwt/pom.xml +++ b/runtime/guard-jwt/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/metrics-grpc/pom.xml b/runtime/metrics-grpc/pom.xml index 656e054f19..bb11a0a1b0 100644 --- a/runtime/metrics-grpc/pom.xml +++ b/runtime/metrics-grpc/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/metrics-http/pom.xml b/runtime/metrics-http/pom.xml index ca8f82823f..5ec0b35b06 100644 --- a/runtime/metrics-http/pom.xml +++ b/runtime/metrics-http/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/metrics-stream/pom.xml b/runtime/metrics-stream/pom.xml index a694454652..f2d7486432 100644 --- a/runtime/metrics-stream/pom.xml +++ b/runtime/metrics-stream/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/pom.xml b/runtime/pom.xml index 2c8d0f9a9f..45180a380f 100644 --- a/runtime/pom.xml +++ b/runtime/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/resolver-env/pom.xml b/runtime/resolver-env/pom.xml index d3876c079b..08000d35df 100644 --- a/runtime/resolver-env/pom.xml +++ b/runtime/resolver-env/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/runtime/vault-filesystem/pom.xml b/runtime/vault-filesystem/pom.xml index 3584a0cfdb..8ef3f7866d 100644 --- a/runtime/vault-filesystem/pom.xml +++ b/runtime/vault-filesystem/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-echo.spec/pom.xml b/specs/binding-echo.spec/pom.xml index 1a12649d7f..0eeb899f06 100644 --- a/specs/binding-echo.spec/pom.xml +++ b/specs/binding-echo.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-fan.spec/pom.xml b/specs/binding-fan.spec/pom.xml index af560f1162..ae86d81d8a 100644 --- a/specs/binding-fan.spec/pom.xml +++ b/specs/binding-fan.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-filesystem.spec/pom.xml b/specs/binding-filesystem.spec/pom.xml index 747de52d7f..9f1b70da7c 100644 --- a/specs/binding-filesystem.spec/pom.xml +++ b/specs/binding-filesystem.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-grpc-kafka.spec/pom.xml b/specs/binding-grpc-kafka.spec/pom.xml index 4078ac4848..adc090a284 100644 --- a/specs/binding-grpc-kafka.spec/pom.xml +++ b/specs/binding-grpc-kafka.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-grpc.spec/pom.xml b/specs/binding-grpc.spec/pom.xml index fb4f732c3c..afd883b3b2 100644 --- a/specs/binding-grpc.spec/pom.xml +++ b/specs/binding-grpc.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-grpc.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/config/server.when.catalog.yaml b/specs/binding-grpc.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/config/server.when.catalog.yaml new file mode 100644 index 0000000000..aa3321b6d7 --- /dev/null +++ b/specs/binding-grpc.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/config/server.when.catalog.yaml @@ -0,0 +1,59 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +--- +name: test +catalogs: + catalog0: + type: test + options: + id: 1 + subject: echo + schema: | + syntax = "proto3"; + + package example; + + option java_multiple_files = true; + option java_outer_classname = "EchoProto"; + + service EchoService + { + rpc EchoUnary(EchoMessage) returns (EchoMessage); + + rpc EchoClientStream(stream EchoMessage) returns (EchoMessage); + + rpc EchoServerStream( EchoMessage) returns (stream EchoMessage); + + rpc EchoStream(stream EchoMessage) returns (stream EchoMessage); + } + + message EchoMessage + { + string message = 1; + } +bindings: + net0: + type: grpc + kind: server + catalog: + catalog0: + - subject: echo + routes: + - exit: app0 + when: + - method: example.EchoService/* + metadata: + custom: test diff --git a/specs/binding-http-filesystem.spec/pom.xml b/specs/binding-http-filesystem.spec/pom.xml index 578bf2d4a6..dafcf7b0e0 100644 --- a/specs/binding-http-filesystem.spec/pom.xml +++ b/specs/binding-http-filesystem.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-http-kafka.spec/pom.xml b/specs/binding-http-kafka.spec/pom.xml index be63de9b6e..b6b90a0724 100644 --- a/specs/binding-http-kafka.spec/pom.xml +++ b/specs/binding-http-kafka.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-http.spec/pom.xml b/specs/binding-http.spec/pom.xml index 5871e04aa7..cb62714090 100644 --- a/specs/binding-http.spec/pom.xml +++ b/specs/binding-http.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v1.1/client.validation.yaml b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v1.1/client.validation.yaml index 11a85eec9d..8e372ef168 100644 --- a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v1.1/client.validation.yaml +++ b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v1.1/client.validation.yaml @@ -29,8 +29,12 @@ bindings: content-type: - text/plain headers: - x-hello: test - content: test + x-hello: + model: test + length: 13 + content: + model: test + length: 13 versions: - http/1.1 exit: net0 diff --git a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v1.1/server.validation.yaml b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v1.1/server.model.yaml similarity index 72% rename from specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v1.1/server.validation.yaml rename to specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v1.1/server.model.yaml index 16956d2571..f70ed53ed2 100644 --- a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v1.1/server.validation.yaml +++ b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v1.1/server.model.yaml @@ -24,21 +24,32 @@ bindings: requests: - path: /hello method: GET - content: test + content: + model: test + length: 13 - path: /valid/{category}/{id} method: POST content-type: - text/plain headers: - code: test + code: + model: test + length: 13 params: path: - category: test - id: test + category: + model: test + length: 13 + id: + model: test + length: 13 query: - page: test + page: + model: test + length: 13 content: - type: test + model: test + length: 13 versions: - http/1.1 routes: diff --git a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v2/client.validation.yaml b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v2/client.validation.yaml index 91fdc73388..abd740a5c0 100644 --- a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v2/client.validation.yaml +++ b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v2/client.validation.yaml @@ -29,8 +29,12 @@ bindings: content-type: - text/plain headers: - x-hello: test - content: test + x-hello: + model: test + length: 13 + content: + model: test + length: 13 versions: - h2 exit: net0 diff --git a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v2/server.validation.yaml b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v2/server.model.yaml similarity index 72% rename from specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v2/server.validation.yaml rename to specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v2/server.model.yaml index a925071f02..ad507a7721 100644 --- a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v2/server.validation.yaml +++ b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/config/v2/server.model.yaml @@ -24,21 +24,32 @@ bindings: requests: - path: /hello method: GET - content: test + content: + model: test + length: 13 - path: /valid/{category}/{id} method: POST content-type: - text/plain headers: - code: test + code: + model: test + length: 13 params: path: - category: test - id: test + category: + model: test + length: 13 + id: + model: test + length: 13 query: - page: test + page: + model: test + length: 13 content: - type: test + model: test + length: 13 versions: - h2 routes: diff --git a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/schema/http.schema.patch.json b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/schema/http.schema.patch.json index fe229928c2..2ae7449415 100644 --- a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/schema/http.schema.patch.json +++ b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/schema/http.schema.patch.json @@ -280,7 +280,7 @@ { "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": { - "$ref": "#/$defs/validator/type" + "$ref": "#/$defs/validator" } } }, @@ -296,7 +296,7 @@ { "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": { - "$ref": "#/$defs/validator/type" + "$ref": "#/$defs/validator" } } }, @@ -307,7 +307,7 @@ { "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": { - "$ref": "#/$defs/validator/type" + "$ref": "#/$defs/validator" } } } @@ -316,7 +316,7 @@ }, "content": { - "$ref": "#/$defs/validator/type" + "$ref": "#/$defs/validator" }, "responses": { @@ -357,13 +357,13 @@ { "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": { - "$ref": "#/$defs/validator/type" + "$ref": "#/$defs/validator" } } }, "content": { - "$ref": "#/$defs/validator/type" + "$ref": "#/$defs/validator" } }, "anyOf": diff --git a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/streams/application/rfc7540/validation/invalid.response.header/client.rpt b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/streams/application/rfc7540/validation/invalid.response.header/client.rpt index 1b1f7930cb..74a6468019 100644 --- a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/streams/application/rfc7540/validation/invalid.response.header/client.rpt +++ b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/streams/application/rfc7540/validation/invalid.response.header/client.rpt @@ -33,4 +33,5 @@ read zilla:begin.ext ${http:matchBeginEx() .header("retry-after", "0") .build()} +write aborted read closed diff --git a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/streams/application/rfc7540/validation/invalid.response.header/server.rpt b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/streams/application/rfc7540/validation/invalid.response.header/server.rpt index 5b5a6c2a22..31c1935352 100644 --- a/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/streams/application/rfc7540/validation/invalid.response.header/server.rpt +++ b/specs/binding-http.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/streams/application/rfc7540/validation/invalid.response.header/server.rpt @@ -34,4 +34,5 @@ write zilla:begin.ext ${http:beginEx() .header("retry-after", "0") .build()} +read abort write close diff --git a/specs/binding-kafka-grpc.spec/pom.xml b/specs/binding-kafka-grpc.spec/pom.xml index 1617569505..f50ae3ac8e 100644 --- a/specs/binding-kafka-grpc.spec/pom.xml +++ b/specs/binding-kafka-grpc.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-kafka.spec/pom.xml b/specs/binding-kafka.spec/pom.xml index 6097874014..e1d35390af 100644 --- a/specs/binding-kafka.spec/pom.xml +++ b/specs/binding-kafka.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java index 0991539035..d6016eafc5 100644 --- a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java +++ b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java @@ -81,6 +81,7 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupMemberFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupTopicMetadataFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaInitProducerIdBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedConsumerFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedDataExFW; @@ -88,6 +89,7 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFetchFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedProduceDataExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedProduceFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMetaBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMetaDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaOffsetCommitBeginExFW; @@ -974,6 +976,13 @@ public KafkaOffsetCommitBeginExBuilder offsetCommit() return new KafkaOffsetCommitBeginExBuilder(); } + public KafkaInitProducerIdBeginExBuilder initProducerId() + { + beginExRW.kind(KafkaApi.INIT_PRODUCER_ID.value()); + + return new KafkaInitProducerIdBeginExBuilder(); + } + public byte[] build() { final KafkaBeginExFW beginEx = beginExRO; @@ -1328,14 +1337,6 @@ public KafkaProduceBeginExBuilder transaction( return this; } - public KafkaProduceBeginExBuilder producerId( - long producerId) - { - ensureTransactionSet(); - produceBeginExRW.producerId(producerId); - return this; - } - public KafkaProduceBeginExBuilder topic( String topic) { @@ -1574,13 +1575,6 @@ private KafkaOffsetCommitBeginExBuilder() offsetCommitBeginExRW.wrap(writeBuffer, KafkaBeginExFW.FIELD_OFFSET_OFFSET_COMMIT, writeBuffer.capacity()); } - public KafkaOffsetCommitBeginExBuilder topic( - String topic) - { - offsetCommitBeginExRW.topic(topic); - return this; - } - public KafkaOffsetCommitBeginExBuilder groupId( String groupId) { @@ -1602,6 +1596,20 @@ public KafkaOffsetCommitBeginExBuilder instanceId( return this; } + public KafkaOffsetCommitBeginExBuilder host( + String host) + { + offsetCommitBeginExRW.host(host); + return this; + } + + public KafkaOffsetCommitBeginExBuilder port( + int port) + { + offsetCommitBeginExRW.port(port); + return this; + } + public KafkaBeginExBuilder build() { final KafkaOffsetCommitBeginExFW offsetCommitBeginEx = offsetCommitBeginExRW.build(); @@ -1609,6 +1617,39 @@ public KafkaBeginExBuilder build() return KafkaBeginExBuilder.this; } } + + public final class KafkaInitProducerIdBeginExBuilder + { + private final KafkaInitProducerIdBeginExFW.Builder initProduceIdBeginExRW = + new KafkaInitProducerIdBeginExFW.Builder(); + + private KafkaInitProducerIdBeginExBuilder() + { + initProduceIdBeginExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_OFFSET_FETCH, writeBuffer.capacity()); + } + + + public KafkaInitProducerIdBeginExBuilder producerId( + long producerId) + { + initProduceIdBeginExRW.producerId(producerId); + return this; + } + + public KafkaInitProducerIdBeginExBuilder producerEpoch( + short producerEpoch) + { + initProduceIdBeginExRW.producerEpoch(producerEpoch); + return this; + } + + public KafkaBeginExBuilder build() + { + KafkaInitProducerIdBeginExFW initProduceIdBeginEx = initProduceIdBeginExRW.build(); + beginExRO.wrap(writeBuffer, 0, initProduceIdBeginEx.limit()); + return KafkaBeginExBuilder.this; + } + } } public static final class KafkaDataExBuilder @@ -2102,6 +2143,20 @@ public KafkaMergedProduceDataExBuilder timestamp( return this; } + public KafkaMergedProduceDataExBuilder producerId( + long producerId) + { + mergedProduceDataExRW.producerId(producerId); + return this; + } + + public KafkaMergedProduceDataExBuilder producerEpoch( + short producerEpoch) + { + mergedProduceDataExRW.producerEpoch(producerEpoch); + return this; + } + public KafkaMergedProduceDataExBuilder partition( int partitionId, @@ -2354,6 +2409,20 @@ public KafkaProduceDataExBuilder timestamp( return this; } + public KafkaProduceDataExBuilder producerId( + long producerId) + { + produceDataExRW.producerId(producerId); + return this; + } + + public KafkaProduceDataExBuilder producerEpoch( + short producerEpoch) + { + produceDataExRW.producerEpoch(producerEpoch); + return this; + } + public KafkaProduceDataExBuilder sequence( int sequence) { @@ -2491,6 +2560,18 @@ private KafkaOffsetFetchDataExBuilder() offsetFetchDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_OFFSET_FETCH, writeBuffer.capacity()); } + public KafkaOffsetFetchDataExBuilder partition( + int partitionId, + long partitionOffset, + int leaderEpoch) + { + offsetFetchDataExRW.partitionsItem(o -> o + .partitionId(partitionId) + .partitionOffset(partitionOffset) + .leaderEpoch(leaderEpoch)); + return this; + } + public KafkaOffsetFetchDataExBuilder partition( int partitionId, long partitionOffset, @@ -2522,6 +2603,13 @@ private KafkaOffsetCommitDataExBuilder() offsetCommitDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_OFFSET_COMMIT, writeBuffer.capacity()); } + public KafkaOffsetCommitDataExBuilder topic( + String topic) + { + offsetCommitDataExRW.topic(topic); + return this; + } + public KafkaOffsetCommitDataExBuilder progress( int partitionId, long partitionOffset, @@ -2630,6 +2718,13 @@ private KafkaMergedFlushExBuilder() mergedFlushExRW.wrap(writeBuffer, KafkaFlushExFW.FIELD_OFFSET_MERGED, writeBuffer.capacity()); } + public KafkaMergedProduceFlushExBuilder produce() + { + mergedFlushExRW.kind(KafkaApi.PRODUCE.value()); + + return new KafkaMergedProduceFlushExBuilder(); + } + public KafkaMergedFetchFlushExBuilder fetch() { mergedFlushExRW.kind(KafkaApi.FETCH.value()); @@ -2765,6 +2860,50 @@ public KafkaFlushExBuilder build() } } + public final class KafkaMergedProduceFlushExBuilder + { + private final KafkaMergedProduceFlushExFW.Builder mergedProduceFlushExRW = + new KafkaMergedProduceFlushExFW.Builder(); + + private KafkaMergedProduceFlushExBuilder() + { + mergedProduceFlushExRW.wrap(writeBuffer, + KafkaFlushExFW.FIELD_OFFSET_MERGED + KafkaMergedFlushExFW.FIELD_OFFSET_PRODUCE, + writeBuffer.capacity()); + } + + public KafkaMergedProduceFlushExBuilder hashKey( + String hashKey) + { + if (hashKey == null) + { + mergedProduceFlushExRW.hashKey(m -> m.length(-1) + .value((OctetsFW) null)); + } + else + { + keyRO.wrap(hashKey.getBytes(UTF_8)); + mergedProduceFlushExRW.hashKey(k -> k.length(keyRO.capacity()) + .value(keyRO, 0, keyRO.capacity())); + } + return this; + } + + public KafkaMergedProduceFlushExBuilder partitionId( + int partitionId) + { + mergedProduceFlushExRW.partitionId(partitionId); + return this; + } + + public KafkaFlushExBuilder build() + { + final KafkaMergedProduceFlushExFW mergedProduceFlushEx = mergedProduceFlushExRW.build(); + flushExRO.wrap(writeBuffer, 0, mergedProduceFlushEx.limit()); + return KafkaFlushExBuilder.this; + } + } + public final class KafkaMergedConsumerFlushExBuilder { private final KafkaMergedConsumerFlushExFW.Builder mergedConsumerFlushExRW = @@ -3415,6 +3554,8 @@ public final class KafkaProduceDataExMatcherBuilder { private Integer deferred; private Long timestamp; + private Long producerId; + private Short producerEpoch; private Integer sequence; private KafkaAckMode ackMode; private KafkaKeyFW.Builder keyRW; @@ -3438,6 +3579,20 @@ public KafkaProduceDataExMatcherBuilder timestamp( return this; } + public KafkaProduceDataExMatcherBuilder producerId( + long producerId) + { + this.producerId = producerId; + return this; + } + + public KafkaProduceDataExMatcherBuilder producerEpoch( + short producerEpoch) + { + this.producerEpoch = producerEpoch; + return this; + } + public KafkaProduceDataExMatcherBuilder sequence( int sequence) { @@ -3534,6 +3689,18 @@ private boolean matchTimestamp( return timestamp == null || timestamp == produceDataEx.timestamp(); } + private boolean matchProducerId( + final KafkaProduceDataExFW produceDataEx) + { + return producerId == null || producerId == produceDataEx.producerId(); + } + + private boolean matchProducerEpoch( + final KafkaProduceDataExFW produceDataEx) + { + return producerEpoch == null || producerEpoch == produceDataEx.producerEpoch(); + } + private boolean matchSequence( final KafkaProduceDataExFW produceDataEx) { @@ -3588,6 +3755,8 @@ public final class KafkaMergedFetchDataExMatcherBuilder { private Integer deferred; private Long timestamp; + private Long producerId; + private Short producerEpoch; private Long filters; private KafkaOffsetFW.Builder partitionRW; private Array32FW.Builder progressRW; @@ -3614,6 +3783,20 @@ public KafkaMergedFetchDataExMatcherBuilder timestamp( return this; } + public KafkaMergedFetchDataExMatcherBuilder producerId( + long producerId) + { + this.producerId = producerId; + return this; + } + + public KafkaMergedFetchDataExMatcherBuilder producerEpoch( + short producerEpoch) + { + this.producerEpoch = producerEpoch; + return this; + } + public KafkaMergedFetchDataExMatcherBuilder filters( long filters) { @@ -3920,6 +4103,8 @@ public final class KafkaMergedProduceDataExMatcherBuilder { private Integer deferred; private Long timestamp; + private Long producerId; + private Short producerEpoch; private Long filters; private KafkaOffsetFW.Builder partitionRW; private Array32FW.Builder progressRW; @@ -3946,6 +4131,20 @@ public KafkaMergedProduceDataExMatcherBuilder timestamp( return this; } + public KafkaMergedProduceDataExMatcherBuilder producerId( + long producerId) + { + this.producerId = producerId; + return this; + } + + public KafkaMergedProduceDataExMatcherBuilder producerEpoch( + short producerEpoch) + { + this.producerEpoch = producerEpoch; + return this; + } + public KafkaMergedProduceDataExMatcherBuilder filters( long filters) { @@ -4213,6 +4412,8 @@ private boolean match( return matchPartition(produce) && matchDeferred(produce) && matchTimestamp(produce) && + matchProducerId(produce) && + matchProducerEpoch(produce) && matchKey(produce) && matchHashKey(produce) && matchHeaders(produce); @@ -4236,6 +4437,18 @@ private boolean matchTimestamp( return timestamp == null || timestamp == mergedProduceDataEx.timestamp(); } + private boolean matchProducerId( + final KafkaMergedProduceDataExFW mergedProduceDataEx) + { + return producerId == null || producerId == mergedProduceDataEx.producerId(); + } + + private boolean matchProducerEpoch( + final KafkaMergedProduceDataExFW mergedProduceDataEx) + { + return producerEpoch == null || producerEpoch == mergedProduceDataEx.producerEpoch(); + } + private boolean matchKey( final KafkaMergedProduceDataExFW mergedProduceDataEx) { @@ -5349,7 +5562,6 @@ private boolean match( { final KafkaProduceBeginExFW produceBeginEx = beginEx.produce(); return matchTransaction(produceBeginEx) && - matchProducerId(produceBeginEx) && matchTopic(produceBeginEx) && matchPartition(produceBeginEx); } @@ -5360,12 +5572,6 @@ private boolean matchTransaction( return transaction == null || transaction.equals(produceBeginEx.transaction()); } - private boolean matchProducerId( - final KafkaProduceBeginExFW produceBeginEx) - { - return producerId == null || producerId == produceBeginEx.producerId(); - } - private boolean matchTopic( final KafkaProduceBeginExFW produceBeginEx) { diff --git a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl index b9bc62e50c..a3c0874d9d 100644 --- a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl +++ b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl @@ -171,6 +171,7 @@ scope kafka GROUP (253), BOOTSTRAP (254), MERGED (255), + INIT_PRODUCER_ID (22), META (3), OFFSET_COMMIT (8), OFFSET_FETCH (9), @@ -185,6 +186,7 @@ scope kafka case 253: kafka::stream::KafkaGroupBeginEx group; case 254: kafka::stream::KafkaBootstrapBeginEx bootstrap; case 255: kafka::stream::KafkaMergedBeginEx merged; + case 22: kafka::stream::KafkaInitProducerIdBeginEx initProducerId; case 3: kafka::stream::KafkaMetaBeginEx meta; case 8: kafka::stream::KafkaOffsetCommitBeginEx offsetCommit; case 9: kafka::stream::KafkaOffsetFetchBeginEx offsetFetch; @@ -253,6 +255,8 @@ scope kafka { int32 deferred = 0; // INIT only (TODO: move to DATA frame) int64 timestamp = 0; // INIT only + int64 producerId = -1; // INIT only + int16 producerEpoch = -1; // INIT only KafkaOffset partition; // INIT only KafkaKey key; // INIT only KafkaKey hashKey; // INIT only @@ -275,6 +279,7 @@ scope kafka { case 252: kafka::stream::KafkaMergedConsumerFlushEx consumer; case 1: kafka::stream::KafkaMergedFetchFlushEx fetch; + case 0: kafka::stream::KafkaMergedProduceFlushEx produce; } struct KafkaMergedConsumerFlushEx @@ -292,6 +297,12 @@ scope kafka KafkaKey key; } + struct KafkaMergedProduceFlushEx + { + KafkaKey hashKey; + int32 partitionId = -1; + } + struct KafkaMetaBeginEx { string16 topic; @@ -354,7 +365,6 @@ scope kafka struct KafkaProduceBeginEx { string8 transaction; // = null; - int64 producerId = -1; string16 topic; KafkaOffset partition; } @@ -363,6 +373,8 @@ scope kafka { int32 deferred = 0; int64 timestamp = 0; + int64 producerId = -1; + int16 producerEpoch = -1; int32 sequence = -1; uint32 crc32c = 0; KafkaAckMode ackMode = IN_SYNC_REPLICAS; @@ -379,8 +391,8 @@ scope kafka { int32 partitionId = -1; int64 partitionOffset = -1; - int32 leaderEpoch; - string16 metadata; + int32 leaderEpoch = -1; + string16 metadata = null; } struct KafkaGroupTopicMetadata @@ -452,12 +464,6 @@ scope kafka int64 correlationId = -1; } - struct KafkaOffsetFetchTopic - { - string16 topic; - KafkaTopicPartition[] partitions; - } - struct KafkaOffsetFetchBeginEx { string16 groupId; @@ -474,18 +480,26 @@ scope kafka struct KafkaOffsetCommitBeginEx { - string16 topic; string16 groupId; string16 memberId; string16 instanceId; + string16 host = null; + int32 port = 0; } struct KafkaOffsetCommitDataEx { + string16 topic; KafkaOffset progress; int32 generationId; int32 leaderEpoch; } + + struct KafkaInitProducerIdBeginEx + { + int64 producerId; + int16 producerEpoch; + } } scope rebalance diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/cache.client.options.validate.yaml b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/cache.client.options.validate.yaml index 152899df55..2895815a17 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/cache.client.options.validate.yaml +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/cache.client.options.validate.yaml @@ -20,6 +20,7 @@ catalogs: test0: type: test options: + id: 1 schema: | { "fields": [ @@ -44,7 +45,11 @@ bindings: topics: - name: test value: - type: test + model: test + length: 13 + catalog: + test0: + - id: 1 routes: - exit: cache0 when: diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/cache.options.convert.yaml b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/cache.options.convert.yaml new file mode 100644 index 0000000000..bac0e616b8 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/cache.options.convert.yaml @@ -0,0 +1,64 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +--- +name: test +catalogs: + test0: + type: test + options: + id: 1 + schema: | + { + "fields": [ + { + "name": "id", + "type": "string" + }, + { + "name": "status", + "type": "string" + } + ], + "name": "Event", + "namespace": "io.aklivity.example", + "type": "record" + } +bindings: + app0: + type: kafka + kind: cache_client + routes: + - exit: cache0 + when: + - topic: test + cache0: + type: kafka + kind: cache_server + options: + topics: + - name: test + value: + model: test + length: 13 + catalog: + test0: + - id: 1 + routes: + - exit: app1 + when: + - topic: test + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/cache.options.validate.yaml b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/cache.options.validate.yaml index 53d3f7b3f9..42192fdf2e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/cache.options.validate.yaml +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/cache.options.validate.yaml @@ -20,6 +20,7 @@ catalogs: test0: type: test options: + id: 1 schema: | { "fields": [ @@ -50,10 +51,13 @@ bindings: options: topics: - name: test - key: - type: test value: - type: test + model: test + capability: read + length: 13 + catalog: + test0: + - id: 1 routes: - exit: app1 when: diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.merged.yaml b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.merged.yaml index b5de08fa5a..4987d8fd1d 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.merged.yaml +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.merged.yaml @@ -17,10 +17,12 @@ --- name: test bindings: - app0: - type: kafka - kind: client - options: - merged: - - test - exit: net0 + app0: + type: kafka + kind: client + options: + servers: + - localhost:9092 + merged: + - test + exit: net0 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.sasl.plain.yaml b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.sasl.plain.yaml index 8f056ede55..a5907739a7 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.sasl.plain.yaml +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.sasl.plain.yaml @@ -17,13 +17,15 @@ --- name: test bindings: - app0: - type: kafka - kind: client - options: - sasl: - mechanism: plain - username: username - password: password - routes: - - exit: net0 + app0: + type: kafka + kind: client + options: + servers: + - localhost:9092 + sasl: + mechanism: plain + username: username + password: password + routes: + - exit: net0 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.sasl.scram.yaml b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.sasl.scram.yaml index d894f9edbc..c8b56fc364 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.sasl.scram.yaml +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.options.sasl.scram.yaml @@ -17,13 +17,15 @@ --- name: test bindings: - app0: - type: kafka - kind: client - options: - sasl: - mechanism: scram-sha-1 - username: user - password: pencil - routes: - - exit: net0 + app0: + type: kafka + kind: client + options: + servers: + - localhost:9092 + sasl: + mechanism: scram-sha-1 + username: user + password: pencil + routes: + - exit: net0 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.when.topic.yaml b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.when.topic.yaml index 4df038c9fe..0e6e9e267a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.when.topic.yaml +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.when.topic.yaml @@ -20,6 +20,9 @@ bindings: app0: type: kafka kind: client + options: + servers: + - localhost:9092 routes: - exit: net0 when: diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.when.topics.yaml b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.when.topics.yaml index b743ce17e7..de89f09c26 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.when.topics.yaml +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.when.topics.yaml @@ -20,6 +20,9 @@ bindings: app0: type: kafka kind: client + options: + servers: + - localhost:9092 routes: - exit: net0 when: diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.yaml b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.yaml index cb3217d787..049d91da43 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.yaml +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/config/client.yaml @@ -17,7 +17,10 @@ --- name: test bindings: - app0: - type: kafka - kind: client - exit: net0 + app0: + type: kafka + kind: client +# options: +# servers: +# - localhost:9092 + exit: net0 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json index 1e4aa05c5c..9e561112d5 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json @@ -82,11 +82,11 @@ }, "key": { - "$ref": "#/$defs/validator/type" + "$ref": "#/$defs/converter" }, "value": { - "$ref": "#/$defs/validator/type" + "$ref": "#/$defs/converter" } } } diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/client.rpt index bb7635fdba..a771dd38c1 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/client.rpt @@ -128,7 +128,6 @@ connect await RECEIVED_OFFSET_COMMIT write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetCommit() - .topic("test") .groupId("client-1") .memberId("memberId-1") .instanceId("zilla") @@ -140,6 +139,7 @@ connected write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 2, "test-meta") .generationId(0) .leaderEpoch(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/server.rpt index 93f0a7753e..48a34b2a83 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/server.rpt @@ -128,7 +128,6 @@ accepted read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetCommit() - .topic("test") .groupId("client-1") .memberId("memberId-1") .instanceId("zilla") @@ -140,6 +139,7 @@ connected read zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 2, "test-meta") .generationId(0) .leaderEpoch(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.valid/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.valid/client.rpt deleted file mode 100644 index 7b293ec9f5..0000000000 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.valid/client.rpt +++ /dev/null @@ -1,77 +0,0 @@ -# -# Copyright 2021-2023 Aklivity Inc. -# -# Aklivity licenses this file to you under the Apache License, -# version 2.0 (the "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -connect "zilla://streams/app0" - option zilla:window 8192 - option zilla:transmission "half-duplex" - -write zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .meta() - .topic("test") - .build() - .build()} - -connected - -read zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .meta() - .topic("test") - .build() - .build()} - -read zilla:data.ext ${kafka:dataEx() - .typeId(zilla:id("kafka")) - .meta() - .partition(0, 177) - .build() - .build()} - -read notify ROUTED_BROKER_CLIENT - -connect await ROUTED_BROKER_CLIENT - "zilla://streams/app0" - option zilla:window 8192 - option zilla:transmission "half-duplex" - option zilla:affinity 0xb1 - option zilla:byteorder "network" - -write zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .fetch() - .topic("test") - .partition(0, 10) - .build() - .build()} - -connected - -read zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .fetch() - .topic("test") - .partition(0, 10, 10) - .build() - .build()} - -read zilla:data.ext ${kafka:matchDataEx() - .typeId(zilla:id("kafka")) - .fetch() - .partition(0, 10, 10) - .build() - .build()} -read [0x00] 0x09 ${kafka:varint(3)} "id0" ${kafka:varint(8)} "positive" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/client.rpt index a549d08744..30a9f0b563 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/client.rpt @@ -35,7 +35,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/server.rpt index c7cce87c75..59256ffd46 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/client.rpt index 49d7c86c06..99a434b412 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/client.rpt @@ -35,7 +35,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/server.rpt index 85ebf4994f..ca505f7def 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/client.rpt index 45761ae379..3c443409ec 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/client.rpt @@ -35,7 +35,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/server.rpt index 24d8fa5314..d604c2be10 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt index 77d1e5a543..42068b91c9 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt @@ -36,7 +36,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt index 053771f954..a3f4300fbf 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/client.rpt index 0681f574df..b736d8bf08 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/client.rpt @@ -35,7 +35,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/server.rpt index 190f5d143e..4524486f31 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/client.rpt index 7b80d69206..e2a512b722 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/client.rpt @@ -35,7 +35,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() @@ -82,7 +82,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/server.rpt index 45a9295835..27bbc1df5f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() @@ -83,7 +83,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt index f7e8a73b5f..0c0e0a71c2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt @@ -35,7 +35,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt index 37853f30cd..f8349399e2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt index 02b8d2ee7f..dcd115d304 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt @@ -43,7 +43,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(45000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt index 9d4a24ccf4..d9ce648278 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt @@ -54,7 +54,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt index 9d0671ff4d..8797e6d003 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -35,7 +35,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt index ab1377d3cc..cde6a3a29f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt index bb64576edb..ea74b0d94f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt @@ -35,7 +35,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt index bd287c531f..24e7bedf28 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt index 12932685e8..2a9dc91ce2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt @@ -35,7 +35,7 @@ read zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("unknown") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt index 797cd3814a..6df8cfb20e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("unknown") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt index bed3ada860..bf33f09564 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt @@ -35,7 +35,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt index a6a033eee2..65d9983e4c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/server.sent.read.abort.after.join.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/server.sent.read.abort.after.join.group/client.rpt index c2617b9d27..b5e81d4f31 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/server.sent.read.abort.after.join.group/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/server.sent.read.abort.after.join.group/client.rpt @@ -35,7 +35,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/server.sent.read.abort.after.join.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/server.sent.read.abort.after.join.group/server.rpt index b1d23cd559..53495a59c9 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/server.sent.read.abort.after.join.group/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/server.sent.read.abort.after.join.group/server.rpt @@ -39,7 +39,7 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("test") .protocol("highlander") .instanceId("zilla") - .host("localhost") + .host("broker1.example.com") .port(9092) .timeout(30000) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/init.producer.id/produce.new.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/init.producer.id/produce.new.id/client.rpt new file mode 100644 index 0000000000..11b2fe8dc0 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/init.producer.id/produce.new.id/client.rpt @@ -0,0 +1,37 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(-1) + .producerEpoch(-1) + .build() + .build()} +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(2) + .build() + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/init.producer.id/produce.new.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/init.producer.id/produce.new.id/server.rpt new file mode 100644 index 0000000000..8b4631548b --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/init.producer.id/produce.new.id/server.rpt @@ -0,0 +1,44 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(-1) + .producerEpoch(-1) + .build() + .build()} + +connected + + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(2) + .build() + .build()} +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.convert/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.convert/client.rpt new file mode 100644 index 0000000000..d5c72d43c2 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.convert/client.rpt @@ -0,0 +1,39 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 16 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("test") + .partition(0, 1) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .fetch() + .partition(0, 1, 2) + .build() + .build()} +read ${kafka:varint(3)} "id0" ${kafka:varint(8)} "positive" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.convert/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.convert/server.rpt new file mode 100644 index 0000000000..2845c92004 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.convert/server.rpt @@ -0,0 +1,46 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("test") + .partition(0, 1) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .fetch() + .timestamp(newTimestamp) + .partition(0, 1, 2) + .build() + .build()} +write ${kafka:varint(3)} "id0" ${kafka:varint(8)} "positive" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.invalid/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.invalid/client.rpt new file mode 100644 index 0000000000..47049c4a59 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.invalid/client.rpt @@ -0,0 +1,30 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 16 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("test") + .partition(0, 1) + .build() + .build()} + +connected diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.invalid/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.invalid/server.rpt new file mode 100644 index 0000000000..1f5bddd838 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.invalid/server.rpt @@ -0,0 +1,35 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("test") + .partition(0, 1) + .build() + .build()} + +connected diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.valid/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.valid/client.rpt new file mode 100644 index 0000000000..d5c72d43c2 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.valid/client.rpt @@ -0,0 +1,39 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 16 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("test") + .partition(0, 1) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .fetch() + .partition(0, 1, 2) + .build() + .build()} +read ${kafka:varint(3)} "id0" ${kafka:varint(8)} "positive" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.valid/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.valid/server.rpt new file mode 100644 index 0000000000..2845c92004 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.value.valid/server.rpt @@ -0,0 +1,46 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("test") + .partition(0, 1) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .fetch() + .timestamp(newTimestamp) + .partition(0, 1, 2) + .build() + .build()} +write ${kafka:varint(3)} "id0" ${kafka:varint(8)} "positive" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/client.rpt index c7de6e3bfc..b4e579768d 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/client.rpt @@ -31,6 +31,16 @@ write zilla:begin.ext ${kafka:beginEx() connected +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .partition(0, -1) + .partition(1, -1) + .build() + .build()} + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/server.rpt index 7ef71f8311..097224b9e0 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/server.rpt @@ -36,6 +36,17 @@ read zilla:begin.ext ${kafka:beginEx() connected +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .partition(0, -1) + .partition(1, -1) + .build() + .build()} +write flush + read zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.string.invalid/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.value.partition.id/client.rpt similarity index 50% rename from specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.string.invalid/client.rpt rename to specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.value.partition.id/client.rpt index 87d7a9a0ed..dbb2c9f467 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.string.invalid/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.value.partition.id/client.rpt @@ -20,57 +20,53 @@ connect "zilla://streams/app0" write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) - .meta() + .merged() + .capabilities("PRODUCE_ONLY") .topic("test") + .ackMode("LEADER_ONLY") .build() .build()} connected -read zilla:begin.ext ${kafka:beginEx() +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .partition(0, -1) + .build() + .build()} + +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) - .meta() - .topic("test") + .merged() + .produce() + .hashKey("key7") .build() .build()} -read zilla:data.ext ${kafka:dataEx() - .typeId(zilla:id("kafka")) - .meta() - .partition(0, 177) - .build() - .build()} - -read notify ROUTED_BROKER_CLIENT - -connect await ROUTED_BROKER_CLIENT - "zilla://streams/app0" - option zilla:window 8192 - option zilla:transmission "half-duplex" - option zilla:affinity 0xb1 - -write zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .fetch() - .topic("test") - .partition(0, 10) - .build() - .build()} +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .partitionId(0) + .build() + .build()} -connected -read zilla:begin.ext ${kafka:beginEx() +write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) - .fetch() - .topic("test") - .partition(0, 10, 10) + .merged() + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .partition(0, 0) + .key("a") + .hashKey("key7") .build() .build()} +write "Hello, world #A1" +write flush -read zilla:data.ext ${kafka:matchDataEx() - .typeId(zilla:id("kafka")) - .fetch() - .partition(0, 10, 10) - .build() - .build()} -read [0xc6] diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.value.partition.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.value.partition.id/server.rpt new file mode 100644 index 0000000000..58abdd81f4 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.value.partition.id/server.rpt @@ -0,0 +1,75 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .partition(0, -1) + .build() + .build()} +write flush + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("key7") + .build() + .build()} + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .partitionId(0) + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .partition(0, 0) + .key("a") + .hashKey("key7") + .build() + .build()} +read "Hello, world #A1" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.producer.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.producer.id/client.rpt new file mode 100644 index 0000000000..0d31cddaf3 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.producer.id/client.rpt @@ -0,0 +1,111 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .partition(0, 1) + .build() + .build()} +write "Hello, world #A1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .partition(1, 1) + .build() + .build()} +write "Hello, world #B1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .partition(0, 2) + .build() + .build()} +write "Hello, world #A2" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .partition(1, 2) + .build() + .build()} +write "Hello, world #B2" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .partition(2, 1) + .build() + .build()} +write "Hello, world #C1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .partition(2, 2) + .build() + .build()} +write "Hello, world #C2" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.producer.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.producer.id/server.rpt new file mode 100644 index 0000000000..a790cc43fe --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.producer.id/server.rpt @@ -0,0 +1,98 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 16 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .producerId(1) + .producerEpoch(1) + .partition(0, 1) + .build() + .build()} +read "Hello, world #A1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .producerId(1) + .producerEpoch(1) + .partition(1, 1) + .build() + .build()} +read "Hello, world #B1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .producerId(1) + .producerEpoch(1) + .partition(0, 2) + .build() + .build()} +read "Hello, world #A2" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .producerId(1) + .producerEpoch(1) + .partition(1, 2) + .build() + .build()} +read "Hello, world #B2" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .producerId(1) + .producerEpoch(1) + .partition(2, 1) + .build() + .build()} +read "Hello, world #C1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .producerId(1) + .producerEpoch(1) + .partition(2, 2) + .build() + .build()} +read "Hello, world #C2" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.convert/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.convert/client.rpt new file mode 100644 index 0000000000..4181bd09dd --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.convert/client.rpt @@ -0,0 +1,136 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} + +read notify RECEIVED_CONFIG + +connect await RECEIVED_CONFIG + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .build() + .build()} +read notify PARTITION_COUNT_2 + +connect await PARTITION_COUNT_2 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, -2) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, 1, 2) + .build() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .fetch() + .partition(0, 1, 2) + .build() + .build()} +read ${kafka:varint(3)} "id0" ${kafka:varint(8)} "positive" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.convert/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.convert/server.rpt new file mode 100644 index 0000000000..e5ee0f307f --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.convert/server.rpt @@ -0,0 +1,139 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, -2) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, 1, 2) + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .fetch() + .timestamp(newTimestamp) + .partition(0, 1, 2) + .build() + .build()} +write ${kafka:varint(3)} "id0" ${kafka:varint(8)} "positive" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.invalid/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.invalid/client.rpt new file mode 100644 index 0000000000..0800d537c5 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.invalid/client.rpt @@ -0,0 +1,136 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} + +read notify RECEIVED_CONFIG + +connect await RECEIVED_CONFIG + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .build() + .build()} +read notify PARTITION_COUNT_2 + +connect await PARTITION_COUNT_2 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, -2) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, 1, 2) + .build() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .fetch() + .partition(0, 1, 2) + .build() + .build()} +read ${kafka:varint(3)} "id0" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.invalid/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.invalid/server.rpt new file mode 100644 index 0000000000..70e460c1d2 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.invalid/server.rpt @@ -0,0 +1,139 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, -2) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, 1, 2) + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .fetch() + .timestamp(newTimestamp) + .partition(0, 1, 2) + .build() + .build()} +write ${kafka:varint(3)} "id0" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.valid/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.valid/client.rpt new file mode 100644 index 0000000000..4181bd09dd --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.valid/client.rpt @@ -0,0 +1,136 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} + +read notify RECEIVED_CONFIG + +connect await RECEIVED_CONFIG + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .build() + .build()} +read notify PARTITION_COUNT_2 + +connect await PARTITION_COUNT_2 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, -2) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, 1, 2) + .build() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .fetch() + .partition(0, 1, 2) + .build() + .build()} +read ${kafka:varint(3)} "id0" ${kafka:varint(8)} "positive" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.valid/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.valid/server.rpt new file mode 100644 index 0000000000..e5ee0f307f --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.fetch.message.value.valid/server.rpt @@ -0,0 +1,139 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, -2) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, 1, 2) + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .fetch() + .timestamp(newTimestamp) + .partition(0, 1, 2) + .build() + .build()} +write ${kafka:varint(3)} "id0" ${kafka:varint(8)} "positive" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/client.rpt index 5bf4738102..47cda8d305 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/client.rpt @@ -255,7 +255,6 @@ connect await RECEIVED_MESSAGE write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetCommit() - .topic("test") .groupId("client-1") .memberId("memberId-1") .instanceId("zilla") @@ -267,6 +266,7 @@ connected write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 3, "test-meta") .generationId(0) .leaderEpoch(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/server.rpt index 07c865357d..82b57608e9 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/server.rpt @@ -244,7 +244,6 @@ accepted read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetCommit() - .topic("test") .groupId("client-1") .memberId("memberId-1") .instanceId("zilla") @@ -256,6 +255,7 @@ connected read zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 3, "test-meta") .generationId(0) .leaderEpoch(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.value.partition.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.value.partition.id/client.rpt new file mode 100644 index 0000000000..1c777a55bc --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.value.partition.id/client.rpt @@ -0,0 +1,143 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} + +read notify RECEIVED_CONFIG + +connect await RECEIVED_CONFIG + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .build() + .build()} +read notify PARTITION_COUNT_1 + +connect await PARTITION_COUNT_1 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 1 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(0) + .ackMode("LEADER_ONLY") + .key("a") + .build() + .build()} +write "Hello, world #A1" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.value.partition.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.value.partition.id/server.rpt new file mode 100644 index 0000000000..61d830eabd --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.value.partition.id/server.rpt @@ -0,0 +1,144 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} +property padding 0 + +accept "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} +write flush + + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(0) + .ackMode("LEADER_ONLY") + .key("a") + .build() + .build()} +read "Hello, world #A1" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.producer.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.producer.id/client.rpt new file mode 100644 index 0000000000..67ba1546ee --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.producer.id/client.rpt @@ -0,0 +1,257 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} + +read notify RECEIVED_CONFIG + +connect await RECEIVED_CONFIG + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .partition(2, 3) + .build() + .build()} +read notify PARTITION_COUNT_3 + +connect await PARTITION_COUNT_3 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 1 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(1) + .ackMode("LEADER_ONLY") + .build() + .build()} +write "Hello, world #A1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(2) + .ackMode("LEADER_ONLY") + .build() + .build()} +write "Hello, world #A2" +write flush + +connect await PARTITION_COUNT_3 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 2 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(1) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(1) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(1) + .ackMode("LEADER_ONLY") + .build() + .build()} +write "Hello, world #B1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(2) + .ackMode("LEADER_ONLY") + .build() + .build()} +write "Hello, world #B2" +write flush + +connect await PARTITION_COUNT_3 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 3 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(2) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(2) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(1) + .ackMode("LEADER_ONLY") + .build() + .build()} +write "Hello, world #C1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(2) + .ackMode("LEADER_ONLY") + .build() + .build()} +write "Hello, world #C2" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.producer.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.producer.id/server.rpt new file mode 100644 index 0000000000..84238ff270 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.producer.id/server.rpt @@ -0,0 +1,245 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app1" + option zilla:window 64 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .partition(2, 3) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(1) + .ackMode("LEADER_ONLY") + .build() + .build()} +read "Hello, world #A1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(2) + .ackMode("LEADER_ONLY") + .build() + .build()} +read "Hello, world #A2" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(1) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(1) + .build() + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(1) + .ackMode("LEADER_ONLY") + .build() + .build()} +read "Hello, world #B1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(2) + .ackMode("LEADER_ONLY") + .build() + .build()} +read "Hello, world #B2" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(2) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(2) + .build() + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(1) + .ackMode("LEADER_ONLY") + .build() + .build()} +read "Hello, world #C1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(2) + .ackMode("LEADER_ONLY") + .build() + .build()} +read "Hello, world #C2" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offset/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offset/client.rpt index 1da69b696e..d6ef7c6b7f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offset/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offset/client.rpt @@ -21,10 +21,11 @@ connect "zilla://streams/app0" write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetCommit() - .topic("test") .groupId("client-1") .memberId("memberId-1") .instanceId("zilla") + .host("broker1.example.com") + .port(9092) .build() .build()} @@ -33,6 +34,7 @@ connected write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 2, "test-meta") .generationId(0) .leaderEpoch(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offset/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offset/server.rpt index 456c7e951d..eafe8e93f1 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offset/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offset/server.rpt @@ -25,10 +25,11 @@ accepted read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetCommit() - .topic("test") .groupId("client-1") .memberId("memberId-1") .instanceId("zilla") + .host("broker1.example.com") + .port(9092) .build() .build()} @@ -37,6 +38,7 @@ connected read zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 2, "test-meta") .generationId(0) .leaderEpoch(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offsets/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offsets/client.rpt index 923b5bf5f7..8750246e4c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offsets/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offsets/client.rpt @@ -21,10 +21,11 @@ connect "zilla://streams/app0" write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetCommit() - .topic("test") .groupId("client-1") .memberId("memberId-1") .instanceId("zilla") + .host("broker1.example.com") + .port(9092) .build() .build()} @@ -33,6 +34,7 @@ connected write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 2, "test-meta") .generationId(0) .leaderEpoch(0) @@ -45,6 +47,7 @@ write flush write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 3, "test-meta") .generationId(0) .leaderEpoch(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offsets/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offsets/server.rpt index 2875b62c68..faa45f62a5 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offsets/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.topic.partition.offsets/server.rpt @@ -25,10 +25,11 @@ accepted read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetCommit() - .topic("test") .groupId("client-1") .memberId("memberId-1") .instanceId("zilla") + .host("broker1.example.com") + .port(9092) .build() .build()} @@ -37,6 +38,7 @@ connected read zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 2, "test-meta") .generationId(0) .leaderEpoch(0) @@ -48,6 +50,7 @@ read zilla:data.empty read zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 3, "test-meta") .generationId(0) .leaderEpoch(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.unknown.topic.partition.offset/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.unknown.topic.partition.offset/client.rpt index 418daab13c..75be0c8918 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.unknown.topic.partition.offset/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.unknown.topic.partition.offset/client.rpt @@ -21,10 +21,11 @@ connect "zilla://streams/app0" write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetCommit() - .topic("test") .groupId("client-1") .memberId("memberId-1") .instanceId("zilla") + .host("broker1.example.com") + .port(9092) .build() .build()} @@ -33,6 +34,7 @@ connected write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 2, "test-meta") .generationId(0) .leaderEpoch(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.unknown.topic.partition.offset/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.unknown.topic.partition.offset/server.rpt index f4a2ce42c0..2c2f1e700d 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.unknown.topic.partition.offset/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/update.unknown.topic.partition.offset/server.rpt @@ -25,10 +25,11 @@ accepted read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetCommit() - .topic("test") .groupId("client-1") .memberId("memberId-1") .instanceId("zilla") + .host("broker1.example.com") + .port(9092) .build() .build()} @@ -37,6 +38,7 @@ connected read zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .offsetCommit() + .topic("test") .progress(0, 2, "test-meta") .generationId(0) .leaderEpoch(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/client.rpt index 726e94ed6d..55cbb8e74d 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetFetch() .groupId("client-1") - .host("localhost") + .host("broker1.example.com") .port(9092) .topic("test") .partition(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/server.rpt index a750c62abc..d145758f81 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/server.rpt @@ -26,7 +26,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetFetch() .groupId("client-1") - .host("localhost") + .host("broker1.example.com") .port(9092) .topic("test") .partition(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/client.rpt index 9f8784881c..f89273e77f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetFetch() .groupId("client-1") - .host("localhost") + .host("broker1.example.com") .port(9092) .topic("test") .partition(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/server.rpt index ee6d437f1e..09dbd47ddc 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/server.rpt @@ -26,7 +26,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetFetch() .groupId("client-1") - .host("localhost") + .host("broker1.example.com") .port(9092) .topic("test") .partition(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/client.rpt index 7bb6c2991f..0b2db6310d 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetFetch() .groupId("client-1") - .host("localhost") + .host("broker1.example.com") .port(9092) .topic("test") .partition(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/server.rpt index 672d3870e4..dd075140b6 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/server.rpt @@ -26,7 +26,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .offsetFetch() .groupId("client-1") - .host("localhost") + .host("broker1.example.com") .port(9092) .topic("test") .partition(0) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.invalid/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.producer.id/client.rpt similarity index 75% rename from specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.invalid/client.rpt rename to specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.producer.id/client.rpt index 86e24097e4..483b0570ce 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.invalid/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.producer.id/client.rpt @@ -14,6 +14,9 @@ # under the License. # +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "half-duplex" @@ -48,13 +51,12 @@ connect await ROUTED_BROKER_CLIENT option zilla:window 8192 option zilla:transmission "half-duplex" option zilla:affinity 0xb1 - option zilla:byteorder "network" write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) - .fetch() + .produce() .topic("test") - .partition(0, 10) + .partition(0) .build() .build()} @@ -62,17 +64,20 @@ connected read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) - .fetch() + .produce() .topic("test") - .partition(0, 10, 10) + .partition(0) .build() .build()} -read zilla:data.ext ${kafka:matchDataEx() - .typeId(zilla:id("kafka")) - .fetch() - .partition(0, 10, 10) - .build() - .build()} - -read [0x00] 0x09 ${kafka:varint(3)} "id0" +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(8) + .producerEpoch(1) + .sequence(0) + .build() + .build()} +write "Hello, world" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.string.invalid/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.producer.id/server.rpt similarity index 76% rename from specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.string.invalid/server.rpt rename to specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.producer.id/server.rpt index 122816eb6f..2338b82799 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.string.invalid/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.producer.id/server.rpt @@ -14,9 +14,6 @@ # under the License. # -property deltaMillis 0L -property newTimestamp ${kafka:timestamp() + deltaMillis} - property serverAddress "zilla://streams/app0" accept ${serverAddress} @@ -54,9 +51,9 @@ accepted read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) - .fetch() + .produce() .topic("test") - .partition(0, 10) + .partition(0) .build() .build()} @@ -64,19 +61,18 @@ connected write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) - .fetch() + .produce() .topic("test") - .partition(0, 10, 10) + .partition(0) .build() .build()} -write flush -write zilla:data.ext ${kafka:dataEx() - .typeId(zilla:id("kafka")) - .fetch() - .timestamp(newTimestamp) - .partition(0, 10, 10) - .build() - .build()} -write [0xc6] -write flush +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .producerId(8) + .producerEpoch(1) + .sequence(0) + .build() + .build()} +read "Hello, world" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.value.repeated/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.value.repeated/client.rpt index 980c49a933..ce3393ea3b 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.value.repeated/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.value.repeated/client.rpt @@ -74,6 +74,7 @@ write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .produce() .timestamp(newTimestamp) + .sequence(0) .build() .build()} write "Hello, world" @@ -83,6 +84,7 @@ write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .produce() .timestamp(newTimestamp) + .sequence(1) .build() .build()} write "Hello, world" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.changes/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.changes/client.rpt new file mode 100644 index 0000000000..4b980edead --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.changes/client.rpt @@ -0,0 +1,97 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 177) + .build() + .build()} + +read notify ROUTED_BROKER_CLIENT + +connect await ROUTED_BROKER_CLIENT + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 0xb1 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(0) + .header("header1", "value1") + .build() + .build()} +write "Hello, world" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(2) + .producerEpoch(2) + .sequence(0) + .header("header1", "value1") + .build() + .build()} +write "Hello, again" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.changes/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.changes/server.rpt new file mode 100644 index 0000000000..dd64a9c5d6 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.changes/server.rpt @@ -0,0 +1,93 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:padding 512 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 177) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .producerId(1) + .producerEpoch(1) + .sequence(0) + .header("header1", "value1") + .build() + .build()} + +read "Hello, world" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .producerId(2) + .producerEpoch(2) + .sequence(0) + .header("header1", "value1") + .build() + .build()} + +read "Hello, again" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.replay/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.replay/client.rpt new file mode 100644 index 0000000000..d6d7147ec0 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.replay/client.rpt @@ -0,0 +1,95 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 177) + .build() + .build()} + +read notify ROUTED_BROKER_CLIENT + +connect await ROUTED_BROKER_CLIENT + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 0xb1 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(2) + .build() + .build()} +write "Hello, world" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(1) + .build() + .build()} +write "Hello, again" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.invalid/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.replay/server.rpt similarity index 68% rename from specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.invalid/server.rpt rename to specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.replay/server.rpt index 9b0ce53e9e..3d5d3309ba 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.invalid/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id.replay/server.rpt @@ -14,15 +14,12 @@ # under the License. # -property deltaMillis 0L -property newTimestamp ${kafka:timestamp() + deltaMillis} - property serverAddress "zilla://streams/app0" accept ${serverAddress} option zilla:window 8192 + option zilla:padding 512 option zilla:transmission "half-duplex" - option zilla:byteorder "network" accepted @@ -55,9 +52,9 @@ accepted read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) - .fetch() + .produce() .topic("test") - .partition(0, 10) + .partition(0) .build() .build()} @@ -65,19 +62,30 @@ connected write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) - .fetch() + .produce() .topic("test") - .partition(0, 10, 10) + .partition(0) .build() .build()} -write flush -write zilla:data.ext ${kafka:dataEx() - .typeId(zilla:id("kafka")) - .fetch() - .timestamp(newTimestamp) - .partition(0, 10, 10) - .build() - .build()} -write [0x00] 0x09 ${kafka:varint(3)} "id0" -write flush +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .producerId(1) + .producerEpoch(1) + .sequence(2) + .build() + .build()} + +read "Hello, world" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .producerId(1) + .producerEpoch(1) + .sequence(1) + .build() + .build()} + +read "Hello, again" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id/client.rpt new file mode 100644 index 0000000000..042e1058a1 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id/client.rpt @@ -0,0 +1,95 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 177) + .build() + .build()} + +read notify ROUTED_BROKER_CLIENT + +connect await ROUTED_BROKER_CLIENT + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 0xb1 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(0) + .build() + .build()} +write "Hello, world" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .producerId(1) + .producerEpoch(1) + .sequence(1) + .build() + .build()} +write "Hello, again" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.valid/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id/server.rpt similarity index 68% rename from specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.valid/server.rpt rename to specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id/server.rpt index 6745fd21e5..073007b705 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/fetch/message.value.valid/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/produce/message.values.producer.id/server.rpt @@ -14,15 +14,12 @@ # under the License. # -property deltaMillis 0L -property newTimestamp ${kafka:timestamp() + deltaMillis} - property serverAddress "zilla://streams/app0" accept ${serverAddress} option zilla:window 8192 + option zilla:padding 512 option zilla:transmission "half-duplex" - option zilla:byteorder "network" accepted @@ -55,9 +52,9 @@ accepted read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) - .fetch() + .produce() .topic("test") - .partition(0, 10) + .partition(0) .build() .build()} @@ -65,19 +62,30 @@ connected write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) - .fetch() + .produce() .topic("test") - .partition(0, 10, 10) + .partition(0) .build() .build()} -write flush -write zilla:data.ext ${kafka:dataEx() - .typeId(zilla:id("kafka")) - .fetch() - .timestamp(newTimestamp) - .partition(0, 10, 10) - .build() - .build()} -write [0x00] 0x09 ${kafka:varint(3)} "id0" ${kafka:varint(8)} "positive" -write flush +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .producerId(1) + .producerEpoch(1) + .sequence(0) + .build() + .build()} + +read "Hello, world" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .producerId(1) + .producerEpoch(1) + .sequence(1) + .build() + .build()} + +read "Hello, again" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.read.abort.after.sync.group.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.read.abort.after.sync.group.response/client.rpt index da63c8b7e1..065c0c1ff3 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.read.abort.after.sync.group.response/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.read.abort.after.sync.group.response/client.rpt @@ -36,15 +36,28 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +write abort +read abort + +read notify ROUTED_CLUSTER_SERVER + +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + write 87 # size 32s # describe configs 0s # v0 @@ -77,19 +90,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -write abort -read abort - -read notify ROUTED_CLUSTER_SERVER - -connect await ROUTED_CLUSTER_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.read.abort.after.sync.group.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.read.abort.after.sync.group.response/server.rpt index b40e2bfa84..c124069d3f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.read.abort.after.sync.group.response/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.read.abort.after.sync.group.response/server.rpt @@ -32,14 +32,21 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port + +read aborted +write aborted + +accepted + +connected read 87 # size 32s # describe configs @@ -73,13 +80,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -read aborted -write aborted - -accepted - -connected - read 82 # size 11s # join group 5s # v5 @@ -96,7 +96,6 @@ read 82 # size 14 # metadata size [0..14] # metadata - write 34 # size ${newRequestId} 0 # throttle time diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.after.sync.group.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.after.sync.group.response/client.rpt index fe48f35d1c..0cf2e01601 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.after.sync.group.response/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.after.sync.group.response/client.rpt @@ -37,15 +37,28 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +write abort +read abort + +read notify ROUTED_CLUSTER_SERVER + +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + write 87 # size 32s # describe configs 0s # v0 @@ -78,19 +91,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -write abort -read abort - -read notify ROUTED_CLUSTER_SERVER - -connect await ROUTED_CLUSTER_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.after.sync.group.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.after.sync.group.response/server.rpt index eb0e6cb9a0..1c5ebede71 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.after.sync.group.response/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.after.sync.group.response/server.rpt @@ -33,14 +33,21 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port + +read aborted +write aborted + +accepted + +connected read 87 # size 32s # describe configs @@ -74,13 +81,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -read aborted -write aborted - -accepted - -connected - read 82 # size 11s # join group 5s # v5 @@ -97,7 +97,6 @@ read 82 # size 14 # metadata size [0..14] # metadata - write 34 # size ${newRequestId} 0 # throttle time diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt index dacc0807d6..fe93036897 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt @@ -36,13 +36,13 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port write abort diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt index b1566379e8..1ea060a443 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt @@ -31,13 +31,13 @@ read 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port read aborted diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt index 08661fc2cc..e779b4fcdb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt @@ -54,15 +54,25 @@ write 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + write 87 # size 32s # describe configs 0s # v0 @@ -95,16 +105,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER - -connect await ROUTED_DESCRIBE_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt index 1bf54e54de..80a2db1cfb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt @@ -50,15 +50,19 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +accepted + +connected + read 87 # size 32s # describe configs 0s # v0 @@ -91,10 +95,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt index 91f78fc199..513dfcc3de 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt @@ -37,48 +37,15 @@ write 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port - -write 87 # size - 32s # describe configs - 0s # v0 - ${newRequestId} - 5s "zilla" # client id - 1 # resources - [0x04] # broker resource - 1s "0" # "node" topic - 2 # configs - 28s "group.min.session.timeout.ms" # name - 28s "group.max.session.timeout.ms" # name - -read 103 # size - (int:newRequestId) - 0 - 1 # resources - 0s # no error - -1s # error message - [0x04] # broker resource - 1s "0" # "0" nodeId - 2 # configs - 28s "group.min.session.timeout.ms" # name - 4s "6000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - 28s "group.max.session.timeout.ms" # name - 5s "30000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - write 22 # size 10s # find coordinator 1s # v1 @@ -87,15 +54,25 @@ write 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + write 87 # size 32s # describe configs 0s # v0 @@ -128,16 +105,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER - -connect await ROUTED_DESCRIBE_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 @@ -164,6 +131,38 @@ read 24 # size 0s # not a coordinator for a consumer 0 # members +write 87 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + 5s "zilla" # client id + 1 # resources + [0x04] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x04] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt index c893a46fdc..28ca49fac2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt @@ -33,48 +33,15 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port -read 87 # size - 32s # describe configs - 0s # v0 - (int:requestId) - 5s "zilla" # client id - 1 # resources - [0x04] # broker resource - 1s "0" # "node" topic - 2 # configs - 28s "group.min.session.timeout.ms" # name - 28s "group.max.session.timeout.ms" # name - -write 103 # size - ${requestId} - 0 - 1 # resources - 0s # no error - -1s # error message - [0x04] # broker resource - 1s "0" # "0" nodeId - 2 # configs - 28s "group.min.session.timeout.ms" # name - 4s "6000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - 28s "group.max.session.timeout.ms" # name - 5s "30000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - - read 22 # size 10s # find coordinator 1s # v1 @@ -83,15 +50,18 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +accepted + +connected read 87 # size 32s # describe configs @@ -125,10 +95,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - read 82 # size 11s # join group 5s # v5 @@ -155,6 +121,38 @@ write 24 # size 0s # not a coordinator for a consumer 0 # members +read 87 # size + 32s # describe configs + 0s # v0 + (int:requestId) + 5s "zilla" # client id + 1 # resources + [0x04] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x04] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/group.authorization.failed/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/group.authorization.failed/server.rpt index 0740c4badf..41568036d4 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/group.authorization.failed/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/group.authorization.failed/server.rpt @@ -14,8 +14,6 @@ # under the License. # - - accept "zilla://streams/net0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt index dab341cf4f..f4b3290705 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt @@ -37,15 +37,25 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +read notify ROUTED_CLUSTER_SERVER + +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + write 87 # size 32s # describe configs 0s # v0 @@ -78,16 +88,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_CLUSTER_SERVER - -connect await ROUTED_CLUSTER_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt index a0cd9134d4..720ab5f789 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt @@ -33,14 +33,18 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port + +accepted + +connected read 87 # size 32s # describe configs @@ -74,10 +78,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.describe.config/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.describe.config/client.rpt index 52f761612d..714fcc7a0a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.describe.config/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.describe.config/client.rpt @@ -54,15 +54,25 @@ write 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + write 87 # size 32s # describe configs 0s # v0 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.describe.config/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.describe.config/server.rpt index 89f6753d3a..a8de4fea29 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.describe.config/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.describe.config/server.rpt @@ -50,15 +50,19 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +accepted + +connected + read 87 # size 32s # describe configs 0s # v0 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.session.timeout/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.session.timeout/client.rpt index b8c4ca1662..434daa389f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.session.timeout/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.session.timeout/client.rpt @@ -54,15 +54,25 @@ write 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + write 87 # size 32s # describe configs 0s # v0 @@ -85,16 +95,6 @@ read 24 # size 1s "0" # "0" nodeId 0 # configs -read notify ROUTED_DESCRIBE_SERVER - -connect await ROUTED_DESCRIBE_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.session.timeout/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.session.timeout/server.rpt index e2f1393ede..f9870b5e41 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.session.timeout/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/invalid.session.timeout/server.rpt @@ -50,15 +50,19 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +accepted + +connected + read 87 # size 32s # describe configs 0s # v0 @@ -81,11 +85,6 @@ write 24 # size 1s "0" # "0" nodeId 0 # configs - -accepted - -connected - read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.multiple.members.with.same.group.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.multiple.members.with.same.group.id/client.rpt index 105aafc599..a8bacbd9cb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.multiple.members.with.same.group.id/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.multiple.members.with.same.group.id/client.rpt @@ -37,47 +37,15 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port -write 87 # size - 32s # describe configs - 0s # v0 - ${newRequestId} - 5s "zilla" # client id - 1 # resources - [0x04] # broker resource - 1s "0" # "node" topic - 2 # configs - 28s "group.min.session.timeout.ms" # name - 28s "group.max.session.timeout.ms" # name - -read 103 # size - (int:newRequestId) - 0 - 1 # resources - 0s # no error - -1s # error message - [0x04] # broker resource - 1s "0" # "0" nodeId - 2 # configs - 28s "group.min.session.timeout.ms" # name - 4s "6000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - 28s "group.max.session.timeout.ms" # name - 5s "30000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - write 22 # size 10s # find coordinator 1s # v1 @@ -86,15 +54,25 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + write 87 # size 32s # describe configs 0s # v0 @@ -127,16 +105,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER - -connect await ROUTED_DESCRIBE_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 @@ -234,6 +202,38 @@ read 35 # size 5s "zilla" # group instance id 0s # no error +write 87 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + 5s "zilla" # client id + 1 # resources + [0x04] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x04] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.multiple.members.with.same.group.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.multiple.members.with.same.group.id/server.rpt index 1b5fa5c8de..e890130367 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.multiple.members.with.same.group.id/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.multiple.members.with.same.group.id/server.rpt @@ -33,47 +33,15 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port -read 87 # size - 32s # describe configs - 0s # v0 - (int:requestId) - 5s "zilla" # client id - 1 # resources - [0x04] # broker resource - 1s "0" # "node" topic - 2 # configs - 28s "group.min.session.timeout.ms" # name - 28s "group.max.session.timeout.ms" # name - -write 103 # size - ${requestId} - 0 - 1 # resources - 0s # no error - -1s # error message - [0x04] # broker resource - 1s "0" # "0" nodeId - 2 # configs - 28s "group.min.session.timeout.ms" # name - 4s "6000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - 28s "group.max.session.timeout.ms" # name - 5s "30000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - read 22 # size 10s # find coordinator 1s # v1 @@ -82,15 +50,19 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +accepted + +connected + read 87 # size 32s # describe configs 0s # v0 @@ -123,11 +95,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive - -accepted - -connected - read 82 # size 11s # join group 5s # v5 @@ -228,6 +195,38 @@ write 35 # size #Second try +read 87 # size + 32s # describe configs + 0s # v0 + (int:requestId) + 5s "zilla" # client id + 1 # resources + [0x04] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x04] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt index d9bed7db1f..b92ce35508 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt @@ -37,15 +37,25 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + write 87 # size 32s # describe configs 0s # v0 @@ -78,16 +88,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER - -connect await ROUTED_DESCRIBE_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt index 5dbb989b5e..7620db0fd6 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt @@ -33,15 +33,19 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +accepted + +connected + read 87 # size 32s # describe configs 0s # v0 @@ -74,10 +78,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt index e66eac3d3c..f02014b3f1 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt @@ -37,15 +37,24 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port +read notify ROUTED_DESCRIBE_SERVER_FIRST + +connect await ROUTED_DESCRIBE_SERVER_FIRST + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected write 87 # size 32s # describe configs @@ -79,16 +88,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER_FIRST - -connect await ROUTED_DESCRIBE_SERVER_FIRST - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt index 45520ab4b1..c956785c2a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt @@ -33,15 +33,18 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port +accepted + +connected read 87 # size 32s # describe configs @@ -75,10 +78,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt index 0f3f08f7be..b26007961b 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -37,14 +37,24 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port + +read notify ROUTED_DESCRIBE_SERVER_FIRST + +connect await ROUTED_DESCRIBE_SERVER_FIRST + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected write 87 # size 32s # describe configs @@ -78,16 +88,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER_FIRST - -connect await ROUTED_DESCRIBE_SERVER_FIRST - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt index 373d77393e..7d3756cbc8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -33,14 +33,18 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port + +accepted + +connected read 87 # size 32s # describe configs @@ -74,10 +78,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt index d89a2759fe..95591accd8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt @@ -37,14 +37,24 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected write 87 # size 32s # describe configs @@ -78,16 +88,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER - -connect await ROUTED_DESCRIBE_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt index 15e72f9dc9..9fb37b30ae 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt @@ -33,14 +33,18 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port + +accepted + +connected read 87 # size 32s # describe configs @@ -74,10 +78,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt index f90e0113ae..900b00303e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt @@ -37,15 +37,25 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + write 87 # size 32s # describe configs 0s # v0 @@ -78,16 +88,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER - -connect await ROUTED_DESCRIBE_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt index 2777024c75..2c6ef270dc 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt @@ -33,15 +33,19 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +accepted + +connected + read 87 # size 32s # describe configs 0s # v0 @@ -74,10 +78,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt index cbd71efee6..138951c55e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt @@ -37,15 +37,24 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 #port + 19s "broker1.example.com" # host + 9092 # port +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected write 87 # size 32s # describe configs @@ -79,16 +88,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER - -connect await ROUTED_DESCRIBE_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 79 # size 11s # join group 5s # v5 @@ -105,7 +104,6 @@ write 79 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata - read 88 # size (int:newRequestId) 0 # throttle time @@ -141,4 +139,3 @@ read 14 # size 0 # throttle time 0s # no error 0 # assignment - diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt index f3bbbb368c..59adfb243b 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt @@ -14,8 +14,6 @@ # under the License. # - - accept "zilla://streams/net0" option zilla:window 8192 option zilla:transmission "duplex" @@ -33,15 +31,18 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port +accepted + +connected read 87 # size 32s # describe configs @@ -75,10 +76,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - read 79 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt index 7535c1daf9..965005761e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt @@ -37,14 +37,24 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected write 87 # size 32s # describe configs @@ -78,16 +88,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER - -connect await ROUTED_DESCRIBE_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt index b4c8d45ad1..1a0837f257 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt @@ -33,14 +33,18 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host - 9092 # port + 19s "broker1.example.com" # host + 9092 # port + +accepted + +connected read 87 # size 32s # describe configs @@ -74,10 +78,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/topics.partition.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/topics.partition.assignment/client.rpt index db6fd4b680..ac97aaf8fb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/topics.partition.assignment/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/topics.partition.assignment/client.rpt @@ -37,15 +37,25 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +read notify ROUTED_CLUSTER_SERVER + +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + write 87 # size 32s # describe configs 0s # v0 @@ -53,7 +63,7 @@ write 87 # size 5s "zilla" # no client id 1 # resources [0x04] # broker resource - 1s "0" # "node" topic + 1s "0" # "node" 2 # configs 28s "group.min.session.timeout.ms" # name 28s "group.max.session.timeout.ms" # name @@ -78,17 +88,7 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_CLUSTER_SERVER - -connect await ROUTED_CLUSTER_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - -write 219 # size +write 219 # size 11s # join group 5s # v5 ${newRequestId} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/topics.partition.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/topics.partition.assignment/server.rpt index f7c1da64f6..4b0ebba79c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/topics.partition.assignment/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/topics.partition.assignment/server.rpt @@ -33,15 +33,19 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} 0 # throttle time 0s # no error 4s "none" # error message none 0 # coordinator node - 9s "localhost" # host + 19s "broker1.example.com" # host 9092 # port +accepted + +connected + read 87 # size 32s # describe configs 0s # v0 @@ -49,7 +53,7 @@ read 87 # size 5s "zilla" # no client id 1 # resources [0x04] # broker resource - 1s "0" # "node" topic + 1s "0" # "node" 2 # configs 28s "group.min.session.timeout.ms" # name 28s "group.max.session.timeout.ms" # name @@ -74,10 +78,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - read 219 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.plain/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.plain/client.rpt index 5975ed5225..09eb809174 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.plain/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.plain/client.rpt @@ -66,46 +66,14 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) - 0 #throttle time - 0s #no error - 4s "none" #error message none - 0 #coordinator node - 9s "localhost" #host - 9092 #port - -write 87 # size - 32s # describe configs - 0s # v0 - ${newRequestId} - 5s "zilla" # client id - 1 # resources - [0x04] # broker resource - 1s "0" # "node" topic - 2 # configs - 28s "group.min.session.timeout.ms" # name - 28s "group.max.session.timeout.ms" # name - -read 103 # size - (int:newRequestId) - 0 - 1 # resources - 0s # no error - -1s # error message - [0x04] # broker resource - 1s "0" # "0" nodeId - 2 # configs - 28s "group.min.session.timeout.ms" # name - 4s "6000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - 28s "group.max.session.timeout.ms" # name - 5s "30000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 19s "broker1.example.com" # host + 9092 # port read notify ROUTED_DESCRIBE_SERVER @@ -146,6 +114,38 @@ read 20 # size -1s # authentication bytes 0L # session lifetime +write 87 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + 5s "zilla" # client id + 1 # resources + [0x04] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x04] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.plain/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.plain/server.rpt index 7c58d9c255..920e6b434a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.plain/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.plain/server.rpt @@ -62,47 +62,14 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} - 0 #throttle time - 0s #no error - 4s "none" #error message none - 0 #coordinator node - 9s "localhost" #host - 9092 #port - -read 87 # size - 32s # describe configs - 0s # v0 - (int:requestId) - 5s "zilla" # client id - 1 # resources - [0x04] # broker resource - 1s "0" # "node" topic - 2 # configs - 28s "group.min.session.timeout.ms" # name - 28s "group.max.session.timeout.ms" # name - -write 103 # size - ${requestId} - 0 - 1 # resources - 0s # no error - -1s # error message - [0x04] # broker resource - 1s "0" # "0" nodeId - 2 # configs - 28s "group.min.session.timeout.ms" # name - 4s "6000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - 28s "group.max.session.timeout.ms" # name - 5s "30000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 19s "broker1.example.com" # host + 9092 # port accepted @@ -137,6 +104,38 @@ write 20 # size -1s # authentication bytes 0L # session lifetime +read 87 # size + 32s # describe configs + 0s # v0 + (int:requestId) + 5s "zilla" # client id + 1 # resources + [0x04] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x04] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + read 82 # size 11s # join group 5s # v5 @@ -153,7 +152,6 @@ read 82 # size 14 # metadata size [0..14] # metadata - write 91 # size ${newRequestId} 0 # throttle time diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.scram/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.scram/client.rpt index 2ed64ceece..aeab98ddfb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.scram/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.scram/client.rpt @@ -80,46 +80,14 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type -read 35 # size +read 45 # size (int:newRequestId) - 0 #throttle time - 0s #no error - 4s "none" #error message none - 0 #coordinator node - 9s "localhost" #host - 9092 #port - -write 87 # size - 32s # describe configs - 0s # v0 - ${newRequestId} - 5s "zilla" # client id - 1 # resources - [0x04] # broker resource - 1s "0" # "node" topic - 2 # configs - 28s "group.min.session.timeout.ms" # name - 28s "group.max.session.timeout.ms" # name - -read 103 # size - (int:newRequestId) - 0 - 1 # resources - 0s # no error - -1s # error message - [0x04] # broker resource - 1s "0" # "0" nodeId - 2 # configs - 28s "group.min.session.timeout.ms" # name - 4s "6000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - 28s "group.max.session.timeout.ms" # name - 5s "30000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 19s "broker1.example.com" # host + 9092 # port read notify ROUTED_DESCRIBE_SERVER @@ -174,6 +142,38 @@ read 50 # size 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ=" 0L # session lifetime +write 87 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + 5s "zilla" # client id + 1 # resources + [0x04] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x04] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + write 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.scram/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.scram/server.rpt index 92193b4f11..b0470f55f1 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.scram/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader.assignment.with.sasl.scram/server.rpt @@ -76,47 +76,14 @@ read 22 # size 4s "test" # "test" coordinator key [0x00] # coordinator group type -write 35 # size +write 45 # size ${newRequestId} - 0 #throttle time - 0s #no error - 4s "none" #error message none - 0 #coordinator node - 9s "localhost" #host - 9092 #port - -read 87 # size - 32s # describe configs - 0s # v0 - (int:requestId) - 5s "zilla" # client id - 1 # resources - [0x04] # broker resource - 1s "0" # "node" topic - 2 # configs - 28s "group.min.session.timeout.ms" # name - 28s "group.max.session.timeout.ms" # name - -write 103 # size - ${requestId} - 0 - 1 # resources - 0s # no error - -1s # error message - [0x04] # broker resource - 1s "0" # "0" nodeId - 2 # configs - 28s "group.min.session.timeout.ms" # name - 4s "6000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - 28s "group.max.session.timeout.ms" # name - 5s "30000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 19s "broker1.example.com" # host + 9092 # port accepted @@ -165,6 +132,38 @@ write 50 # size 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ=" # authentication bytes 0L # session lifetime +read 87 # size + 32s # describe configs + 0s # v0 + (int:requestId) + 5s "zilla" # client id + 1 # resources + [0x04] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x04] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + read 82 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.plain/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.plain/client.rpt new file mode 100644 index 0000000000..7db462fc04 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.plain/client.rpt @@ -0,0 +1,76 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 17s # sasl.handshake + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 5s "PLAIN" # mechanism + +read 17 # size + ${newRequestId} + 0s # no error + 1 # mechanisms + 5s "PLAIN" # PLAIN + +write 37 # size + 36s # sasl.authenticate + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 18 + [0x00] "username" # authentication bytes + [0x00] "password" + +read 20 # size + ${newRequestId} + 0s # no error + -1 + -1s # authentication bytes + 0L # session lifetime + +write 31 # size + 22s # init producer id + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + -1s # transaction + 60000 # transaction timeout ms + -1L # producer id + -1s # producer epoch + + +read 20 # size + (int:newRequestId) + 0 # throttle time ms + 0s # no error + 1L # producer id + 2s # producer epoch diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.plain/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.plain/server.rpt new file mode 100644 index 0000000000..386cf6820d --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.plain/server.rpt @@ -0,0 +1,72 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkAcceptWindow 8192 + +accept "zilla://streams/net0" + option zilla:window ${networkAcceptWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 22 # size + 17s # sasl.handshake + 1s # v1 + (int:requestId) + 5s "zilla" # client id + 5s "PLAIN" # mechanism + +write 17 # size + ${requestId} + 0s # no error + 1 # mechanisms + 5s "PLAIN" # PLAIN + +read 37 # size + 36s # sasl.authenticate + 1s # v1 + (int:requestId) + 5s "zilla" # client id + 18 + [0x00] "username" # authentication bytes + [0x00] "password" + +write 20 # size + ${requestId} + 0s # no error + -1 + -1s # authentication bytes + 0L # session lifetime + +read 31 # size + 22s # init producer id + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + -1s # transaction + 60000 # transaction timeout ms + -1L # producer id + -1s # producer epoch + +write 20 # size + ${newRequestId} + 0 # throttle time ms + 0s # no error + 1L # producer id + 2s # producer epoch diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.scram/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.scram/client.rpt new file mode 100644 index 0000000000..fd58b8e112 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.scram/client.rpt @@ -0,0 +1,90 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 28 # size + 17s # sasl.handshake + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 11s "SCRAM-SHA-1" # mechanism + +read 23 # size + ${newRequestId} + 0s # no error + 1 # mechanisms + 11s "SCRAM-SHA-1" # SCRAM + +write 55 # size + 36s # sasl.authenticate + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 36 # authentication bytes + "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL" + +read 92 # size + ${newRequestId} + 0s # no error + -1s + 70 "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096" + 0L # session lifetime + +write 101 # size + 36s # sasl.authenticate + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 82 # authentication bytes + "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=" + +read 52 # size + ${newRequestId} + 0s # no error + -1s + 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ=" + 0L # session lifetime + +write 31 # size + 22s # init producer id + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + -1s # transaction + 60000 # transaction timeout ms + -1L # producer id + -1s # producer epoch + + +read 20 # size + (int:newRequestId) + 0 # throttle time ms + 0s # no error + 1L # producer id + 2s # producer epoch diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.scram/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.scram/server.rpt new file mode 100644 index 0000000000..3cd927885b --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1/produce.new.id.sasl.scram/server.rpt @@ -0,0 +1,86 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkAcceptWindow 8192 + +accept "zilla://streams/net0" + option zilla:window ${networkAcceptWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 28 # size + 17s # sasl.handshake + 1s # v1 + (int:requestId) + 5s "zilla" # client id + 11s "SCRAM-SHA-1" # mechanism + +write 23 # size + ${requestId} + 0s # no error + 1 # mechanisms + 11s "SCRAM-SHA-1" # SCRAM + +read 55 # size + 36s # sasl.authenticate + 1s # v1 + (int:requestId) + 5s "zilla" # client id + 36 # authentication bytes + "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL" + +write 92 # size + ${requestId} + 0s # no error + -1s + 70 "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096" # authentication bytes + 0L # session lifetime + +read 101 # size + 36s # sasl.authenticate + 1s # v1 + (int:requestId) + 5s "zilla" # client id + 82 # authentication bytes + "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=" + +write 52 # size + ${requestId} + 0s # no error + -1s + 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ=" # authentication bytes + 0L # session lifetime + +read 31 # size + 22s # init producer id + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + -1s # transaction + 60000 # transaction timeout ms + -1L # producer id + -1s # producer epoch + +write 20 # size + ${newRequestId} + 0 # throttle time ms + 0s # no error + 1L # producer id + 2s # producer epoch diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4/produce.new.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4/produce.new.id/client.rpt new file mode 100644 index 0000000000..595654cba8 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4/produce.new.id/client.rpt @@ -0,0 +1,47 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 31 # size + 22s # init producer id + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + -1s # transaction + 60000 # transaction timeout ms + -1L # producer id + -1s # producer epoch + + +read 20 # size + (int:newRequestId) + 0 # throttle time ms + 0s # no error + 1L # producer id + 2s # producer epoch diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4/produce.new.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4/produce.new.id/server.rpt new file mode 100644 index 0000000000..4786dd1b8f --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4/produce.new.id/server.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkAcceptWindow 8192 + +accept "zilla://streams/net0" + option zilla:window ${networkAcceptWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 31 # size + 22s # init producer id + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + -1s # transaction + 60000 # transaction timeout ms + -1L # producer id + -1s # producer epoch + +write 20 # size + ${newRequestId} + 0 # throttle time ms + 0s # no error + 1L # producer id + 2s # producer epoch diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.plain/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.plain/client.rpt index cd77d6febb..7641f18273 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.plain/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.plain/client.rpt @@ -26,6 +26,19 @@ connect "zilla://streams/net0" option zilla:transmission "duplex" option zilla:byteorder "network" +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected write 22 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.plain/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.plain/server.rpt index 0009e0db1d..3a1909abfd 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.plain/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.plain/server.rpt @@ -23,6 +23,19 @@ accept "zilla://streams/net0" accepted +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected read 22 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.scram/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.scram/client.rpt index 0fe1dcd6c2..b4bac73f3b 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.scram/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.scram/client.rpt @@ -26,6 +26,19 @@ connect "zilla://streams/net0" option zilla:transmission "duplex" option zilla:byteorder "network" +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected write 28 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.scram/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.scram/server.rpt index 9d5025efd2..95a9ef407b 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.scram/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7.sasl.handshake.v1/update.topic.partition.offset.sasl.scram/server.rpt @@ -23,6 +23,19 @@ accept "zilla://streams/net0" accepted +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected read 28 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offset/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offset/client.rpt index eb2e581f31..5f9041a834 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offset/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offset/client.rpt @@ -26,10 +26,23 @@ connect "zilla://streams/net0" option zilla:transmission "duplex" option zilla:byteorder "network" +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected write 89 # size - 8s # offset fetch + 8s # offset commit 7s # 7 ${newRequestId} 5s "zilla" # client id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offset/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offset/server.rpt index 0e5c67e529..91dc255e35 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offset/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offset/server.rpt @@ -23,10 +23,23 @@ accept "zilla://streams/net0" accepted +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected read 89 # size - 8s # offset fetch + 8s # offset commit 7s # 7 (int:newRequestId) 5s "zilla" # client id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offsets/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offsets/client.rpt index ad95d7b1c3..659b073293 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offsets/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offsets/client.rpt @@ -26,6 +26,19 @@ connect "zilla://streams/net0" option zilla:transmission "duplex" option zilla:byteorder "network" +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected write 89 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offsets/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offsets/server.rpt index 0e79e36a60..afe9986063 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offsets/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.topic.partition.offsets/server.rpt @@ -23,6 +23,19 @@ accept "zilla://streams/net0" accepted +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected read 89 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.unknown.topic.partition.offset/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.unknown.topic.partition.offset/client.rpt index cff9b51fcc..3e9632090e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.unknown.topic.partition.offset/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.unknown.topic.partition.offset/client.rpt @@ -26,6 +26,19 @@ connect "zilla://streams/net0" option zilla:transmission "duplex" option zilla:byteorder "network" +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected write 89 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.unknown.topic.partition.offset/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.unknown.topic.partition.offset/server.rpt index 5e8c0e2f7a..d32c451de4 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.unknown.topic.partition.offset/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.commit.v7/update.unknown.topic.partition.offset/server.rpt @@ -23,6 +23,19 @@ accept "zilla://streams/net0" accepted +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected read 89 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.plain/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.plain/client.rpt index e02c484bc1..6a52a8ec2c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.plain/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.plain/client.rpt @@ -26,6 +26,19 @@ connect "zilla://streams/net0" option zilla:transmission "duplex" option zilla:byteorder "network" +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected write 22 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.plain/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.plain/server.rpt index 8c68a56aa2..9d91e5e128 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.plain/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.plain/server.rpt @@ -23,6 +23,19 @@ accept "zilla://streams/net0" accepted +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected read 22 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.scram/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.scram/client.rpt index 54428d1f33..7fdcb94c4a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.scram/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.scram/client.rpt @@ -26,6 +26,19 @@ connect "zilla://streams/net0" option zilla:transmission "duplex" option zilla:byteorder "network" +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected write 28 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.scram/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.scram/server.rpt index 3d550d1059..26adce8ed3 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.scram/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5.sasl.handshake.v1/topic.offset.info.sasl.scram/server.rpt @@ -23,6 +23,19 @@ accept "zilla://streams/net0" accepted +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected read 28 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info.incomplete/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info.incomplete/client.rpt index 929eddfe18..898bdcce05 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info.incomplete/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info.incomplete/client.rpt @@ -26,6 +26,19 @@ connect "zilla://streams/net0" option zilla:transmission "duplex" option zilla:byteorder "network" +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected write 43 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info.incomplete/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info.incomplete/server.rpt index 5ba7f11f68..2a10665f0b 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info.incomplete/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info.incomplete/server.rpt @@ -23,6 +23,19 @@ accept "zilla://streams/net0" accepted +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected read 43 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info/client.rpt index 469f649904..e482d021c1 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info/client.rpt @@ -26,6 +26,19 @@ connect "zilla://streams/net0" option zilla:transmission "duplex" option zilla:byteorder "network" +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected write 43 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info/server.rpt index c13a00714a..907adb7108 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.info/server.rpt @@ -23,6 +23,19 @@ accept "zilla://streams/net0" accepted +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected read 43 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.no.partition/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.no.partition/client.rpt index 0f2f1c85b4..dcd9dc12e7 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.no.partition/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.no.partition/client.rpt @@ -26,6 +26,19 @@ connect "zilla://streams/net0" option zilla:transmission "duplex" option zilla:byteorder "network" +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected write 43 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.no.partition/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.no.partition/server.rpt index 645afd0049..5221638a35 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.no.partition/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v5/topic.offset.no.partition/server.rpt @@ -23,6 +23,19 @@ accept "zilla://streams/net0" accepted +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} connected read 43 # size diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.producer.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.producer.id/client.rpt new file mode 100644 index 0000000000..188fb0e957 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.producer.id/client.rpt @@ -0,0 +1,128 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 + +property newRequestId ${kafka:newRequestId()} +property produceWaitMax 500 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 26 # size + 3s # metadata + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 1 # topics + 4s "test" # "test" topic + [0x00] # allow_auto_topic_creation + +read 97 # size + ${newRequestId} + [0..4] + 1 # brokers + 0xb1 # broker id + 19s "broker1.example.com" # host name + 9092 # port + -1s # no rack + 9s "cluster 1" # cluster id + 1 # controller id + 1 # topics + 0s # no error + 4s "test" # "test" topic + [0x00] # not internal + 1 # partitions + 0s # no error + 0 # partition + 0xb1 # leader + 0 # no replicas + -1 # no in-sync replicas + 0 # offline replicas + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} + +connected + +write 125 # size + 0s # produce + 3s # v3 + ${newRequestId} + 5s "zilla" # client id + -1s # transactional id + 0s # acks + ${produceWaitMax} + 1 + 4s "test" + 1 + 0 # partition + 80 # record set size + 0L # first offset + 68 # length + -1 + [0x02] + 0x4e8723aa + 0s + 0 # last offset delta + ${newTimestamp} # first timestamp + ${newTimestamp} # last timestamp + 8L + 1s + 0 + 1 # records + ${kafka:varint(18)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, world" + ${kafka:varint(0)} # headers + +read 44 + ${newRequestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition + 0s # no error + 20L # base offset + [0..8] # log append time + [0..4] # throttle ms diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.producer.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.producer.id/server.rpt new file mode 100644 index 0000000000..ff81148e96 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.producer.id/server.rpt @@ -0,0 +1,124 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkAcceptWindow 8192 + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/net0" + option zilla:window ${networkAcceptWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 26 # size + 3s # metadata + 5s # v5 + (int:requestId) + 5s "zilla" # client id + 1 # topics + 4s "test" # "test" topic + [0x00] # allow_auto_topic_creation + +write 97 # size + ${requestId} + 0 + 1 # brokers + 0xb1 # broker id + 19s "broker1.example.com" # host name + 9092 # port + -1s # no rack + 9s "cluster 1" # cluster id + 1 # controller id + 1 # topics + 0s # no error + 4s "test" # "test" topic + [0x00] # not internal + 1 # partitions + 0s # no error + 0 # partition + 0xb1 # leader + 0 # no replicas + -1 # no in-sync replicas + 0 # offline replicas + +accepted + +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} + +connected + +read 125 + 0s + 3s + (int:requestId) + 5s "zilla" # client id + -1s + [0..2] + [0..4] + 1 + 4s "test" + 1 + 0 + 80 # record set size + 0L # first offset + 68 # length + -1 + [0x02] + [0..4] + 0s + 0 # last offset delta + (long:timestamp) # first timestamp + ${timestamp} # last timestamp + 8L + 1s + 0 + 1 # records + ${kafka:varint(18)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, world" + ${kafka:varint(0)} # headers + +write 44 + ${requestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition 0 + 0s # no error + 20L # base offset + 0L # log append time + 0 # throttle diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.changes/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.changes/client.rpt new file mode 100644 index 0000000000..f452e74075 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.changes/client.rpt @@ -0,0 +1,182 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 + +property newRequestId ${kafka:newRequestId()} +property produceWaitMax 500 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 26 # size + 3s # metadata + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 1 # topics + 4s "test" # "test" topic + [0x00] # allow_auto_topic_creation + +read 97 # size + ${newRequestId} + [0..4] + 1 # brokers + 0xb1 # broker id + 19s "broker1.example.com" # host name + 9092 # port + -1s # no rack + 9s "cluster 1" # cluster id + 1 # controller id + 1 # topics + 0s # no error + 4s "test" # "test" topic + [0x00] # not internal + 1 # partitions + 0s # no error + 0 # partition + 0xb1 # leader + 0 # no replicas + -1 # no in-sync replicas + 0 # offline replicas + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} + +connected + +write 140 # size + 0s # produce + 3s # v3 + ${newRequestId} + 5s "zilla" # client id + -1s # transactional id + 0s # acks + ${produceWaitMax} + 1 + 4s "test" + 1 + 0 # partition + 95 # record set size + 0L # first offset + 83 # length + -1 + [0x02] + 0x4e8723aa + 0s + 0 # last offset delta + ${newTimestamp} # first timestamp + ${newTimestamp} # last timestamp + 1L + 1s + 0 + 1 # records + ${kafka:varint(33)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, world" + ${kafka:varint(1)} # headers + ${kafka:varint(7)} # key size + "header1" # key bytes + ${kafka:varint(6)} # value size + "value1" # value bytes + +read 44 + ${newRequestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition + 0s # no error + 20L # base offset + [0..8] # log append time + [0..4] # throttle ms + +write 140 # size + 0s # produce + 3s # v3 + ${newRequestId} + 5s "zilla" # client id + -1s # transactional id + 0s # acks + ${produceWaitMax} + 1 + 4s "test" + 1 + 0 # partition + 95 # record set size + 0L # first offset + 83 # length + -1 + [0x02] + 0x4e8723aa + 0s + 0 # last offset delta + ${newTimestamp} # first timestamp + ${newTimestamp} # last timestamp + 2L + 2s + 0 + 1 # records + ${kafka:varint(33)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, again" + ${kafka:varint(1)} # headers + ${kafka:varint(7)} # key size + "header1" # key bytes + ${kafka:varint(6)} # value size + "value1" # value bytes + +read 44 + ${newRequestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition + 0s # no error + 20L # base offset + [0..8] # log append time + [0..4] # throttle ms diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.changes/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.changes/server.rpt new file mode 100644 index 0000000000..f9741db993 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.changes/server.rpt @@ -0,0 +1,178 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkAcceptWindow 8192 + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/net0" + option zilla:window ${networkAcceptWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 26 # size + 3s # metadata + 5s # v5 + (int:requestId) + 5s "zilla" # client id + 1 # topics + 4s "test" # "test" topic + [0x00] # allow_auto_topic_creation + +write 97 # size + ${requestId} + 0 + 1 # brokers + 0xb1 # broker id + 19s "broker1.example.com" # host name + 9092 # port + -1s # no rack + 9s "cluster 1" # cluster id + 1 # controller id + 1 # topics + 0s # no error + 4s "test" # "test" topic + [0x00] # not internal + 1 # partitions + 0s # no error + 0 # partition + 0xb1 # leader + 0 # no replicas + -1 # no in-sync replicas + 0 # offline replicas + +accepted + +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} + +connected + +read 140 + 0s + 3s + (int:requestId) + 5s "zilla" # client id + -1s + [0..2] + [0..4] + 1 + 4s "test" + 1 + 0 + 95 # record set size + 0L # first offset + 83 # length + -1 + [0x02] + [0..4] + 0s + 0 # last offset delta + (long:timestamp) # first timestamp + ${timestamp} # last timestamp + 1L + 1s + 0 + 1 # records + ${kafka:varint(33)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} + ${kafka:varint(12)} + "Hello, world" + ${kafka:varint(1)} # headers + ${kafka:varint(7)} # key size + "header1" # key bytes + ${kafka:varint(6)} # value size + "value1" # value bytes + +write 44 + ${requestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition 0 + 0s # no error + 20L # base offset + 0L # log append time + 0 # throttle + +read 140 + 0s + 3s + (int:requestId) + 5s "zilla" # client id + -1s + [0..2] + [0..4] + 1 + 4s "test" + 1 + 0 + 95 # record set size + 0L # first offset + 83 # length + -1 + [0x02] + [0..4] + 0s + 0 # last offset delta + (long:timestamp) # first timestamp + ${timestamp} # last timestamp + 2L + 2s + 0 + 1 # records + ${kafka:varint(33)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} + ${kafka:varint(12)} + "Hello, again" + ${kafka:varint(1)} # headers + ${kafka:varint(7)} # key size + "header1" # key bytes + ${kafka:varint(6)} # value size + "value1" # value bytes + +write 44 + ${requestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition 0 + 0s # no error + 20L # base offset + 0L # log append time + 0 # throttle diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.replay/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.replay/client.rpt new file mode 100644 index 0000000000..e182fd23d7 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.replay/client.rpt @@ -0,0 +1,174 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 + +property newRequestId ${kafka:newRequestId()} +property produceWaitMax 500 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 26 # size + 3s # metadata + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 1 # topics + 4s "test" # "test" topic + [0x00] # allow_auto_topic_creation + +read 97 # size + ${newRequestId} + [0..4] + 1 # brokers + 0xb1 # broker id + 19s "broker1.example.com" # host name + 9092 # port + -1s # no rack + 9s "cluster 1" # cluster id + 1 # controller id + 1 # topics + 0s # no error + 4s "test" # "test" topic + [0x00] # not internal + 1 # partitions + 0s # no error + 0 # partition + 0xb1 # leader + 0 # no replicas + -1 # no in-sync replicas + 0 # offline replicas + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} + +connected + +write 125 # size + 0s # produce + 3s # v3 + ${newRequestId} + 5s "zilla" # client id + -1s # transactional id + 0s # acks + ${produceWaitMax} + 1 + 4s "test" + 1 + 0 # partition + 80 # record set size + 0L # first offset + 68 # length + -1 + [0x02] + 0x4e8723aa + 0s + 0 # last offset delta + ${newTimestamp} # first timestamp + ${newTimestamp} # last timestamp + 1L + 1s + 2 + 1 # records + ${kafka:varint(18)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, world" + ${kafka:varint(0)} # headers + +read 44 + ${newRequestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition + 0s # no error + 20L # base offset + [0..8] # log append time + [0..4] # throttle ms + +write 125 # size + 0s # produce + 3s # v3 + ${newRequestId} + 5s "zilla" # client id + -1s # transactional id + 0s # acks + ${produceWaitMax} + 1 + 4s "test" + 1 + 0 # partition + 80 # record set size + 0L # first offset + 68 # length + -1 + [0x02] + 0x4e8723aa + 0s + 0 # last offset delta + ${newTimestamp} # first timestamp + ${newTimestamp} # last timestamp + 1L + 1s + 1 + 1 # records + ${kafka:varint(18)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, again" + ${kafka:varint(0)} # headers + +read 44 + ${newRequestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition + 0s # no error + 20L # base offset + [0..8] # log append time + [0..4] # throttle ms diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.replay/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.replay/server.rpt new file mode 100644 index 0000000000..bc36eb5f18 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id.replay/server.rpt @@ -0,0 +1,170 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkAcceptWindow 8192 + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/net0" + option zilla:window ${networkAcceptWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 26 # size + 3s # metadata + 5s # v5 + (int:requestId) + 5s "zilla" # client id + 1 # topics + 4s "test" # "test" topic + [0x00] # allow_auto_topic_creation + +write 97 # size + ${requestId} + 0 + 1 # brokers + 0xb1 # broker id + 19s "broker1.example.com" # host name + 9092 # port + -1s # no rack + 9s "cluster 1" # cluster id + 1 # controller id + 1 # topics + 0s # no error + 4s "test" # "test" topic + [0x00] # not internal + 1 # partitions + 0s # no error + 0 # partition + 0xb1 # leader + 0 # no replicas + -1 # no in-sync replicas + 0 # offline replicas + +accepted + +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} + +connected + +read 125 + 0s + 3s + (int:requestId) + 5s "zilla" # client id + -1s + [0..2] + [0..4] + 1 + 4s "test" + 1 + 0 + 80 # record set size + 0L # first offset + 68 # length + -1 + [0x02] + [0..4] + 0s + 0 # last offset delta + (long:timestamp) # first timestamp + ${timestamp} # last timestamp + 1L + 1s + 2 + 1 # records + ${kafka:varint(18)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, world" + ${kafka:varint(0)} # headers + +write 44 + ${requestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition 0 + 0s # no error + 20L # base offset + 0L # log append time + 0 # throttle + +read 125 + 0s + 3s + (int:requestId) + 5s "zilla" # client id + -1s + [0..2] + [0..4] + 1 + 4s "test" + 1 + 0 + 80 # record set size + 0L # first offset + 68 # length + -1 + [0x02] + [0..4] + 0s + 0 # last offset delta + (long:timestamp) # first timestamp + ${timestamp} # last timestamp + 1L + 1s + 1 + 1 # records + ${kafka:varint(18)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, again" + ${kafka:varint(0)} # headers + +write 44 + ${requestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition 0 + 0s # no error + 20L # base offset + 0L # log append time + 0 # throttle diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id/client.rpt new file mode 100644 index 0000000000..c0652f5969 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id/client.rpt @@ -0,0 +1,136 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 + +property newRequestId ${kafka:newRequestId()} +property produceWaitMax 500 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 26 # size + 3s # metadata + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 1 # topics + 4s "test" # "test" topic + [0x00] # allow_auto_topic_creation + +read 97 # size + ${newRequestId} + [0..4] + 1 # brokers + 0xb1 # broker id + 19s "broker1.example.com" # host name + 9092 # port + -1s # no rack + 9s "cluster 1" # cluster id + 1 # controller id + 1 # topics + 0s # no error + 4s "test" # "test" topic + [0x00] # not internal + 1 # partitions + 0s # no error + 0 # partition + 0xb1 # leader + 0 # no replicas + -1 # no in-sync replicas + 0 # offline replicas + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +write zilla:begin.ext ${proxy:beginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} + +connected + +write 144 # size + 0s # produce + 3s # v3 + ${newRequestId} + 5s "zilla" # client id + -1s # transactional id + 0s # acks + ${produceWaitMax} + 1 + 4s "test" + 1 + 0 # partition + 99 # record set size + 0L # first offset + 87 # length + -1 + [0x02] + 0x4e8723aa + 0s + 1 # last offset delta + ${newTimestamp} # first timestamp + ${newTimestamp} # last timestamp + 1L + 1s + 0 + 2 # records + ${kafka:varint(18)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, world" + ${kafka:varint(0)} # headers + ${kafka:varint(18)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(1)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, again" + ${kafka:varint(0)} # headers + +read 44 + ${newRequestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition + 0s # no error + 20L # base offset + [0..8] # log append time + [0..4] # throttle ms diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id/server.rpt new file mode 100644 index 0000000000..1a28c212c0 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/produce.v3/message.values.producer.id/server.rpt @@ -0,0 +1,133 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkAcceptWindow 8192 + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/net0" + option zilla:window ${networkAcceptWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 26 # size + 3s # metadata + 5s # v5 + (int:requestId) + 5s "zilla" # client id + 1 # topics + 4s "test" # "test" topic + [0x00] # allow_auto_topic_creation + +write 97 # size + ${requestId} + 0 + 1 # brokers + 0xb1 # broker id + 19s "broker1.example.com" # host name + 9092 # port + -1s # no rack + 9s "cluster 1" # cluster id + 1 # controller id + 1 # topics + 0s # no error + 4s "test" # "test" topic + [0x00] # not internal + 1 # partitions + 0s # no error + 0 # partition + 0xb1 # leader + 0 # no replicas + -1 # no in-sync replicas + 0 # offline replicas + +accepted + +read zilla:begin.ext ${proxy:matchBeginEx() + .typeId(zilla:id("proxy")) + .addressInet() + .protocol("stream") + .source("0.0.0.0") + .destination("broker1.example.com") + .sourcePort(0) + .destinationPort(9092) + .build() + .info() + .authority("broker1.example.com") + .build() + .build()} + +connected + +read 144 + 0s + 3s + (int:requestId) + 5s "zilla" # client id + -1s + [0..2] + [0..4] + 1 + 4s "test" + 1 + 0 + 99 # record set size + 0L # first offset + 87 # length + -1 + [0x02] + [0..4] + 0s + 1 # last offset delta + (long:timestamp) # first timestamp + ${timestamp} # last timestamp + 1L + 1s + 0 + 2 # records + ${kafka:varint(18)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(0)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, world" + ${kafka:varint(0)} # headers + ${kafka:varint(18)} + [0x00] + ${kafka:varint(0)} + ${kafka:varint(1)} + ${kafka:varint(-1)} # key + ${kafka:varint(12)} # value + "Hello, again" + ${kafka:varint(0)} # headers + + +write 44 + ${requestId} + 1 # topics + 4s "test" + 1 # partitions + 0 # partition 0 + 0s # no error + 20L # base offset + 0L # log append time + 0 # throttle diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/config/SchemaTest.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/config/SchemaTest.java index 050d013329..d4fc6ca88e 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/config/SchemaTest.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/config/SchemaTest.java @@ -32,7 +32,7 @@ public class SchemaTest public final ConfigSchemaRule schema = new ConfigSchemaRule() .schemaPatch("io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json") .schemaPatch("io/aklivity/zilla/specs/engine/schema/catalog/test.schema.patch.json") - .schemaPatch("io/aklivity/zilla/specs/engine/schema/validator/test.schema.patch.json") + .schemaPatch("io/aklivity/zilla/specs/engine/schema/model/test.schema.patch.json") .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config"); @Test @@ -122,4 +122,12 @@ public void shouldValidateCacheOptionsCatalog() assertThat(config, not(nullValue())); } + + @Test + public void shouldValidateCacheOptionsValidate() + { + JsonObject config = schema.validate("cache.client.options.validate.yaml"); + + assertThat(config, not(nullValue())); + } } diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java index 4e8c7318ae..f996b0c30b 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java @@ -75,6 +75,7 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaInitProducerIdBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFetchDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFlushExFW; @@ -580,6 +581,8 @@ public void shouldGenerateMergedProduceDataExtension() .produce() .deferred(0) .timestamp(12345678L) + .producerId(8L) + .producerEpoch((short) 2) .partition(0, 0L) .key("match") .hashKey("hashKey") @@ -1121,6 +1124,27 @@ public void shouldGenerateMergedFetchFlushExtensionWithStableOffset() .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)))) != null)); } + @Test + public void shouldGenerateMergedProduceFlushExtension() + { + byte[] build = KafkaFunctions.flushEx() + .typeId(0x01) + .merged() + .produce() + .hashKey("hashTopic") + .partitionId(0) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaFlushExFW flushEx = new KafkaFlushExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, flushEx.typeId()); + + assertEquals("hashTopic", flushEx.merged().produce().hashKey() + .value() + .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))); + } + @Test public void shouldGenerateMergedConsumerFlushExtension() { @@ -1153,6 +1177,8 @@ public void shouldMatchProduceMergedDataExtension() throws Exception .partition(0, 0L) .progress(0, 1L) .timestamp(12345678L) + .producerId(1L) + .producerEpoch((short) 1) .key("match") .header("name", "value") .headerNull("name-n") @@ -1167,6 +1193,8 @@ public void shouldMatchProduceMergedDataExtension() throws Exception .merged(m -> m.produce(mp -> mp .deferred(100) .timestamp(12345678L) + .producerId(1L) + .producerEpoch((short) 1) .partition(p -> p.partitionId(0).partitionOffset(0L)) .key(k -> k.length(5) .value(v -> v.set("match".getBytes(UTF_8)))) @@ -1195,6 +1223,9 @@ public void shouldMatchMergedFetchDataExtensionWithLatestOffset() throws Excepti .partition(0, 0L, 1L) .progress(0, 1L, 1L) .timestamp(12345678L) + .producerId(8L) + .producerEpoch((short) 2) + .timestamp(12345678L) .key("match") .header("name", "value") .build() @@ -2752,7 +2783,6 @@ public void shouldMatchProduceBeginExtensionTransaction() throws Exception .typeId(0x01) .produce(f -> f .transaction("transaction") - .producerId(1L) .topic("test") .partition(p -> p.partitionId(0).partitionOffset(0L))) .build(); @@ -2776,7 +2806,6 @@ public void shouldMatchProduceBeginExtensionProducerId() throws Exception .typeId(0x01) .produce(f -> f .transaction("transaction") - .producerId(1L) .topic("test") .partition(p -> p.partitionId(0).partitionOffset(0L))) .build(); @@ -2800,7 +2829,6 @@ public void shouldMatchProduceBeginExtensionTopic() throws Exception .typeId(0x01) .produce(f -> f .transaction("transaction") - .producerId(1L) .topic("test") .partition(p -> p.partitionId(0).partitionOffset(0L))) .build(); @@ -2824,7 +2852,6 @@ public void shouldMatchProduceBeginExtensionPartition() throws Exception .typeId(0x01) .produce(f -> f .transaction("transaction") - .producerId(1L) .topic("test") .partition(p -> p.partitionId(0).partitionOffset(0L))) .build(); @@ -3551,7 +3578,6 @@ public void shouldGenerateProduceBeginExtension() .typeId(0x01) .produce() .transaction("transaction") - .producerId(1L) .topic("topic") .partition(1) .build() @@ -3564,7 +3590,6 @@ public void shouldGenerateProduceBeginExtension() final KafkaProduceBeginExFW produceBeginEx = beginEx.produce(); assertEquals("transaction", produceBeginEx.transaction().asString()); - assertEquals(1L, produceBeginEx.producerId()); assertEquals(1, produceBeginEx.partition().partitionId()); assertEquals("topic", produceBeginEx.topic().asString()); assertEquals(-1L, produceBeginEx.partition().partitionOffset()); @@ -3578,6 +3603,8 @@ public void shouldGenerateProduceDataExtension() .produce() .deferred(10) .timestamp(12345678L) + .producerId(1L) + .producerEpoch((short) 2) .sequence(0) .ackMode("IN_SYNC_REPLICAS") .key("match") @@ -3648,6 +3675,8 @@ public void shouldMatchProduceDataExtensionTimestamp() throws Exception BytesMatcher matcher = KafkaFunctions.matchDataEx() .produce() .timestamp(12345678L) + .producerId(8L) + .producerEpoch((short) 1) .build() .build(); @@ -4268,10 +4297,11 @@ public void shouldGenerateOffsetCommitBeginExtension() byte[] build = KafkaFunctions.beginEx() .typeId(0x01) .offsetCommit() - .topic("topic") .groupId("test") .memberId("member-1") .instanceId("zilla") + .host("broker1.example.com") + .port(9092) .build() .build(); @@ -4282,7 +4312,6 @@ public void shouldGenerateOffsetCommitBeginExtension() final KafkaOffsetCommitBeginExFW offsetCommitBeginEx = beginEx.offsetCommit(); assertEquals("test", offsetCommitBeginEx.groupId().asString()); - assertEquals("topic", offsetCommitBeginEx.topic().asString()); assertEquals("member-1", offsetCommitBeginEx.memberId().asString()); } @@ -4363,12 +4392,34 @@ public void shouldGenerateOffsetFetchDataExtension() assertEquals(1, offsetFetchDataEx.partitions().fieldCount()); } + @Test + public void shouldGenerateInitProducerIdBeginExtension() + { + byte[] build = KafkaFunctions.beginEx() + .typeId(0x01) + .initProducerId() + .producerId(1L) + .producerEpoch((short) 2) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaBeginExFW beginEx = new KafkaBeginExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, beginEx.typeId()); + assertEquals(KafkaApi.INIT_PRODUCER_ID.value(), beginEx.kind()); + + KafkaInitProducerIdBeginExFW initProducerIdBeginEx = beginEx.initProducerId(); + assertEquals(1L, initProducerIdBeginEx.producerId()); + assertEquals(2, initProducerIdBeginEx.producerEpoch()); + } + @Test public void shouldGenerateOffsetCommitDataExtension() { byte[] build = KafkaFunctions.dataEx() .typeId(0x01) .offsetCommit() + .topic("test") .progress(0, 2L, "test-meta") .generationId(0) .leaderEpoch(0) diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/FetchIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/FetchIT.java index 33ea7cc819..2385275b2e 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/FetchIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/FetchIT.java @@ -181,33 +181,6 @@ public void shouldReceiveMessageValue() throws Exception k3po.finish(); } - @Test - @Specification({ - "${app}/message.value.string.invalid/client", - "${app}/message.value.string.invalid/server"}) - public void shouldReceiveMessageValueStringInvalid() throws Exception - { - k3po.finish(); - } - - @Test - @Specification({ - "${app}/message.value.valid/client", - "${app}/message.value.valid/server"}) - public void shouldReceiveMessageValueTest() throws Exception - { - k3po.finish(); - } - - @Test - @Specification({ - "${app}/message.value.invalid/client", - "${app}/message.value.invalid/server"}) - public void shouldReceiveMessageValueTestInvalid() throws Exception - { - k3po.finish(); - } - @Test @Specification({ "${app}/message.value.empty/client", diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/InitProducerIdIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/InitProducerIdIT.java new file mode 100644 index 0000000000..794acf430c --- /dev/null +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/InitProducerIdIT.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.specs.binding.kafka.streams.application; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +public class InitProducerIdIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/init.producer.id"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + @Rule + public final TestRule chain = outerRule(k3po).around(timeout); + + @Test + @Specification({ + "${app}/produce.new.id/client", + "${app}/produce.new.id/server"}) + public void shouldGenerateNewProducerId() throws Exception + { + k3po.finish(); + } +} diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java index 44cae789c0..fccc447235 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java @@ -135,6 +135,33 @@ public void shouldFetchMergedMessagesWithNoFilterReadUncommitted() throws Except k3po.finish(); } + @Test + @Specification({ + "${app}/merged.fetch.message.value.convert/client", + "${app}/merged.fetch.message.value.convert/server"}) + public void shouldFetchMergedMessageValueConvert() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/merged.fetch.message.value.valid/client", + "${app}/merged.fetch.message.value.valid/server"}) + public void shouldFetchMergedMessageValueValid() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/merged.fetch.message.value.invalid/client", + "${app}/merged.fetch.message.value.invalid/server"}) + public void shouldFetchMergedMessageValueInvalid() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/merged.fetch.message.values/client", @@ -384,6 +411,24 @@ public void shouldFetchUnmergedFilterSync() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/unmerged.fetch.message.value.convert/client", + "${app}/unmerged.fetch.message.value.convert/server"}) + public void shouldFetchUnmergedMessageValueConvert() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/unmerged.fetch.message.value.valid/client", + "${app}/unmerged.fetch.message.value.valid/server"}) + public void shouldFetchUnmergedMessageValueValid() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/unmerged.fetch.message.values/client", @@ -552,6 +597,15 @@ public void shouldProduceUnmergedMessageValueInvalid() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/unmerged.fetch.message.value.invalid/client", + "${app}/unmerged.fetch.message.value.invalid/server"}) + public void shouldFetchUnmergedMessageValueInvalid() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/unmerged.fetch.server.sent.close/client", @@ -746,4 +800,41 @@ public void shouldAckUnmergedFetchMessage() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/merged.produce.message.values.producer.id/client", + "${app}/merged.produce.message.values.producer.id/server"}) + public void shouldProduceMergedMessageValuesWithProducerId() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/unmerged.produce.message.values.producer.id/client", + "${app}/unmerged.produce.message.values.producer.id/server"}) + public void shouldProduceUnmergedMessageValuesWithProducerId() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/merged.produce.message.value.partition.id/client", + "${app}/merged.produce.message.value.partition.id/server"}) + public void shouldProduceMergedMessageValueByGettingPartitionId() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/unmerged.produce.message.value.partition.id/client", + "${app}/unmerged.produce.message.value.partition.id/server"}) + public void shouldProduceUnmergedMessageValueByGettingPartitionId() throws Exception + { + k3po.finish(); + } + } diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ProduceIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ProduceIT.java index d15b907eb0..68c95e2df5 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ProduceIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ProduceIT.java @@ -145,6 +145,42 @@ public void shouldSendMessageValue() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/message.producer.id/client", + "${app}/message.producer.id/server"}) + public void shouldSendMessageValueWithProducerId() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/message.values.producer.id/client", + "${app}/message.values.producer.id/server"}) + public void shouldSendMessageValuesWithProducerId() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/message.values.producer.id.changes/client", + "${app}/message.values.producer.id.changes/server"}) + public void shouldSendMessageValuesWithProducerIdThatChanges() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/message.values.producer.id.replay/client", + "${app}/message.values.producer.id.replay/server"}) + public void shouldReplyMessageValuesWithProducerId() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/message.value.null/client", diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/InitProducerIdIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/InitProducerIdIT.java new file mode 100644 index 0000000000..db9e626f68 --- /dev/null +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/InitProducerIdIT.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.specs.binding.kafka.streams.network; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +public class InitProducerIdIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + @Rule + public final TestRule chain = outerRule(k3po).around(timeout); + + @Test + @Specification({ + "${net}/produce.new.id/client", + "${net}/produce.new.id/server"}) + public void shouldGenerateNewProducerId() throws Exception + { + k3po.finish(); + } +} diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/InitProducerIdSaslIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/InitProducerIdSaslIT.java new file mode 100644 index 0000000000..a3c795dfd5 --- /dev/null +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/InitProducerIdSaslIT.java @@ -0,0 +1,57 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.specs.binding.kafka.streams.network; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +public class InitProducerIdSaslIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", + "io/aklivity/zilla/specs/binding/kafka/streams/network/init.producer.id.v4.sasl.handshake.v1"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + @Rule + public final TestRule chain = outerRule(k3po).around(timeout); + + @Test + @Specification({ + "${net}/produce.new.id.sasl.plain/client", + "${net}/produce.new.id.sasl.plain/server"}) + public void shouldGenerateNewProducerIdWithSaslPlain() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/produce.new.id.sasl.scram/client", + "${net}/produce.new.id.sasl.scram/server"}) + public void shouldGenerateNewProducerIdWithSaslScram() throws Exception + { + k3po.finish(); + } +} diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/ProduceIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/ProduceIT.java index 5ec40e1f68..f62ffb3baf 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/ProduceIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/ProduceIT.java @@ -118,6 +118,42 @@ public void shouldSendMessageValue() throws Exception k3po.finish(); } + @Test + @Specification({ + "${net}/message.producer.id/client", + "${net}/message.producer.id/server"}) + public void shouldSendMessageValueWithProducerId() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/message.values.producer.id/client", + "${net}/message.values.producer.id/server"}) + public void shouldSendMessageValuesWithProducerId() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/message.values.producer.id.changes/client", + "${net}/message.values.producer.id.changes/server"}) + public void shouldSendMessageValuesWithProducerIdThatChanges() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/message.values.producer.id.replay/client", + "${net}/message.values.producer.id.replay/server"}) + public void shouldReplyMessageValuesWithProducerId() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${net}/message.value.null/client", diff --git a/specs/binding-mqtt-kafka.spec/pom.xml b/specs/binding-mqtt-kafka.spec/pom.xml index 99ae42042e..f45653bbb6 100644 --- a/specs/binding-mqtt-kafka.spec/pom.xml +++ b/specs/binding-mqtt-kafka.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml @@ -88,7 +88,7 @@ flyweight-maven-plugin ${project.version} - core mqtt kafka + core mqtt kafka mqtt_kafka io.aklivity.zilla.specs.binding.mqtt.kafka.internal.types diff --git a/specs/binding-mqtt-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/kafka/internal/MqttKafkaFunctions.java b/specs/binding-mqtt-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/kafka/internal/MqttKafkaFunctions.java new file mode 100644 index 0000000000..4d6b9f34c0 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/kafka/internal/MqttKafkaFunctions.java @@ -0,0 +1,123 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.aklivity.zilla.specs.binding.mqtt.kafka.internal; + +import org.agrona.BitUtil; +import org.agrona.MutableDirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.kaazing.k3po.lang.el.Function; +import org.kaazing.k3po.lang.el.spi.FunctionMapperSpi; + +import io.aklivity.zilla.specs.binding.mqtt.kafka.internal.types.MqttPublishOffsetMetadataFW; +import io.aklivity.zilla.specs.binding.mqtt.kafka.internal.types.MqttSubscribeOffsetMetadataFW; + +public final class MqttKafkaFunctions +{ + @Function + public static MqttSubscribeOffsetMetadataBuilder subscribeMetadata() + { + return new MqttSubscribeOffsetMetadataBuilder(); + } + + @Function + public static MqttPublishOffsetMetadataBuilder publishMetadata() + { + return new MqttPublishOffsetMetadataBuilder(); + } + + public static final class MqttSubscribeOffsetMetadataBuilder + { + private final MqttSubscribeOffsetMetadataFW.Builder offsetMetadataRW = new MqttSubscribeOffsetMetadataFW.Builder(); + + byte version = 1; + + + private MqttSubscribeOffsetMetadataBuilder() + { + MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); + offsetMetadataRW.wrap(writeBuffer, 0, writeBuffer.capacity()); + offsetMetadataRW.version(version); + } + + public MqttSubscribeOffsetMetadataBuilder metadata( + int packetId) + { + offsetMetadataRW.appendPacketIds((short) packetId); + return this; + } + + public String build() + { + final MqttSubscribeOffsetMetadataFW offsetMetadata = offsetMetadataRW.build(); + return BitUtil.toHex(offsetMetadata.buffer().byteArray(), offsetMetadata.offset(), offsetMetadata.limit()); + } + } + + public static final class MqttPublishOffsetMetadataBuilder + { + private final MqttPublishOffsetMetadataFW.Builder offsetMetadataRW = new MqttPublishOffsetMetadataFW.Builder(); + + byte version = 1; + + + private MqttPublishOffsetMetadataBuilder() + { + MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); + offsetMetadataRW.wrap(writeBuffer, 0, writeBuffer.capacity()); + offsetMetadataRW.version(version); + } + + public MqttPublishOffsetMetadataBuilder packetId( + int packetId) + { + offsetMetadataRW.appendPacketIds((short) packetId); + return this; + } + + public MqttPublishOffsetMetadataBuilder producer( + long producerId, + short producerEpoch) + { + offsetMetadataRW.producerId(producerId).producerEpoch(producerEpoch); + return this; + } + + public String build() + { + final MqttPublishOffsetMetadataFW offsetMetadata = offsetMetadataRW.build(); + return BitUtil.toHex(offsetMetadata.buffer().byteArray(), offsetMetadata.offset(), offsetMetadata.limit()); + } + } + + public static class Mapper extends FunctionMapperSpi.Reflective + { + public Mapper() + { + super(MqttKafkaFunctions.class); + } + + @Override + public String getPrefixName() + { + return "mqtt_kafka"; + } + } + + private MqttKafkaFunctions() + { + /* utility */ + } +} diff --git a/specs/binding-mqtt-kafka.spec/src/main/resources/META-INF/services/org.kaazing.k3po.lang.el.spi.FunctionMapperSpi b/specs/binding-mqtt-kafka.spec/src/main/resources/META-INF/services/org.kaazing.k3po.lang.el.spi.FunctionMapperSpi new file mode 100644 index 0000000000..cf4d32ef12 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/resources/META-INF/services/org.kaazing.k3po.lang.el.spi.FunctionMapperSpi @@ -0,0 +1 @@ +io.aklivity.zilla.specs.binding.mqtt.kafka.internal.MqttKafkaFunctions$Mapper diff --git a/specs/binding-mqtt-kafka.spec/src/main/resources/META-INF/zilla/mqtt_kafka.idl b/specs/binding-mqtt-kafka.spec/src/main/resources/META-INF/zilla/mqtt_kafka.idl new file mode 100644 index 0000000000..74a792a173 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/resources/META-INF/zilla/mqtt_kafka.idl @@ -0,0 +1,33 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +scope mqtt_kafka +{ + struct MqttSubscribeOffsetMetadata + { + uint8 version = 1; + int8 length; + int16[length] packetIds = null; + } + + struct MqttPublishOffsetMetadata + { + uint8 version = 1; + int64 producerId = 0; + int16 producerEpoch = 0; + int8 length; + int16[length] packetIds = null; + } +} \ No newline at end of file diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.mixture.qos/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.mixture.qos/client.rpt index 85102c2f56..f828fe54bc 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.mixture.qos/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.mixture.qos/client.rpt @@ -17,6 +17,356 @@ connect "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_MIGRATE_SIGNAL + + +connect await SENT_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +read notify RECEIVED_LEADER + +write zilla:data.empty +write flush + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} + +write close + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +read notify RECEIVED_PARTITION_METADATA + +write close + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +read zilla:data.empty + +write close +read closed + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +read zilla:data.empty +read notify RECEIVED_INITIAL_OFFSETS + +write close +read closed + + +connect await RECEIVED_INITIAL_OFFSETS + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} +read notify RECEIVED_PRODUCER + +connected + +write close +read closed + + +connect await RECEIVED_PRODUCER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(1, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write notify SENT_INITIAL_OFFSET_COMMIT + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 2, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + + +connect await SENT_INITIAL_OFFSET_COMMIT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + +read advised zilla:flush +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() @@ -46,7 +396,42 @@ write "message1" write flush -connect "zilla://streams/kafka0" +connect await RECEIVED_SESSION_STATE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .packetId(1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -62,12 +447,31 @@ write zilla:begin.ext ${kafka:beginEx() connected +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .partitionId(0) + .build() + .build()} + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() .produce() .deferred(0) - .partition(-1, -1) + .producerId(1) + .producerEpoch(1) + .partition(-1, 1) .key("sensor/one") .header("zilla:filter", "sensor") .header("zilla:filter", "one") @@ -79,7 +483,8 @@ write "message2" write flush -connect "zilla://streams/kafka0" +connect await RECEIVED_SESSION_STATE + "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.mixture.qos/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.mixture.qos/server.rpt index 170fd2677a..a9c56f3429 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.mixture.qos/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.mixture.qos/server.rpt @@ -17,6 +17,331 @@ accept "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .generationId(1) + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +write flush + +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} + +connected + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(1, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 2, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# session expiry cancellation signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} + +write advise zilla:flush + + accepted read zilla:begin.ext ${kafka:beginEx() @@ -48,6 +373,36 @@ read zilla:data.ext ${kafka:matchDataEx() read "message1" +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .packetId(1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + + accepted read zilla:begin.ext ${kafka:beginEx() @@ -62,12 +417,31 @@ read zilla:begin.ext ${kafka:beginEx() connected +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .build() + .build()} + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .partitionId(0) + .build() + .build()} + read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) .merged() .produce() .deferred(0) - .partition(-1, -1) + .producerId(1) + .producerEpoch(1) + .partition(-1, 1) .key("sensor/one") .header("zilla:filter", "sensor") .header("zilla:filter", "one") diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.init.producer.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.init.producer.abort/client.rpt new file mode 100644 index 0000000000..52e8157fab --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.init.producer.abort/client.rpt @@ -0,0 +1,225 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_MIGRATE_SIGNAL + + +connect await SENT_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +read notify RECEIVED_LEADER + +write zilla:data.empty +write flush + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} + +write close + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +read notify RECEIVED_PARTITION_METADATA + +write close + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +read zilla:data.empty + +write close +read closed + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +read zilla:data.empty +read notify RECEIVED_INITIAL_OFFSETS + +write close +read closed + + +connect await RECEIVED_INITIAL_OFFSETS + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} + +connected + +read aborted diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.init.producer.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.init.producer.abort/server.rpt new file mode 100644 index 0000000000..6902e218ac --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.init.producer.abort/server.rpt @@ -0,0 +1,215 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .generationId(1) + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +write flush + +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} + +connected + +write abort diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.meta.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.meta.abort/client.rpt new file mode 100644 index 0000000000..443f1e7ad8 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.meta.abort/client.rpt @@ -0,0 +1,106 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_MIGRATE_SIGNAL + + +connect await SENT_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +read notify RECEIVED_LEADER + +write zilla:data.empty +write flush + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} +connected + +read aborted diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.meta.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.meta.abort/server.rpt new file mode 100644 index 0000000000..2a941a9f15 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.meta.abort/server.rpt @@ -0,0 +1,101 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .generationId(1) + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +write flush + +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} + +connected + +write abort diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase1/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase1/client.rpt new file mode 100644 index 0000000000..44fb9ec716 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase1/client.rpt @@ -0,0 +1,405 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_MIGRATE_SIGNAL + + +connect await SENT_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +read notify RECEIVED_LEADER + +write zilla:data.empty +write flush + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} + +write close + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +read notify RECEIVED_PARTITION_METADATA + +write close + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +read zilla:data.empty + +write close +read closed + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +read zilla:data.empty +read notify RECEIVED_INITIAL_OFFSETS + +write close +read closed + + +connect await RECEIVED_INITIAL_OFFSETS + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} +read notify RECEIVED_PRODUCER + +connected + +write close +read closed + + +connect await RECEIVED_PRODUCER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(1, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write notify SENT_INITIAL_OFFSET_COMMIT + + +connect await SENT_INITIAL_OFFSET_COMMIT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + +read advised zilla:flush +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +read notify PUBLISH_CONNECTED +read aborted + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt-messages") + .partition(-1, -2) + .ackMode("IN_SYNC_REPLICAS") + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .partitionId(0) + .build() + .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase1/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase1/server.rpt new file mode 100644 index 0000000000..048abff16f --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase1/server.rpt @@ -0,0 +1,380 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .generationId(1) + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +write flush + +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} + +connected + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(1, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# session expiry cancellation signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} + +write advise zilla:flush + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +write await PUBLISH_CONNECTED +write abort + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt-messages") + .partition(-1, -2) + .ackMode("IN_SYNC_REPLICAS") + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .build() + .build()} + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .partitionId(0) + .build() + .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase2/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase2/client.rpt new file mode 100644 index 0000000000..66c8e4e541 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase2/client.rpt @@ -0,0 +1,307 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_MIGRATE_SIGNAL + + +connect await SENT_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +read notify RECEIVED_LEADER + +write zilla:data.empty +write flush + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} + +write close + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +read notify RECEIVED_PARTITION_METADATA + +write close + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +read zilla:data.empty + +write close +read closed + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +read zilla:data.empty +read notify RECEIVED_INITIAL_OFFSETS + +write close +read closed + + +connect await RECEIVED_INITIAL_OFFSETS + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} +read notify RECEIVED_PRODUCER + +connected + +write close +read closed + + +connect await RECEIVED_PRODUCER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +read notify RECEIVED_SESSION_CONNECTED +read aborted + +connect await RECEIVED_PRODUCER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase2/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase2/server.rpt new file mode 100644 index 0000000000..88ec7fd4f2 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.commit.abort.phase2/server.rpt @@ -0,0 +1,290 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .generationId(1) + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +write flush + +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} + +connected + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +write await RECEIVED_SESSION_CONNECTED +write abort + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected +# session expiry cancellation signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.fetch.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.fetch.abort/client.rpt new file mode 100644 index 0000000000..1566b57c8f --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.fetch.abort/client.rpt @@ -0,0 +1,159 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_MIGRATE_SIGNAL + + +connect await SENT_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +read notify RECEIVED_LEADER + +write zilla:data.empty +write flush + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} + +write close + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +read notify RECEIVED_PARTITION_METADATA + +write close + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} +connected + +read aborted diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.fetch.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.fetch.abort/server.rpt new file mode 100644 index 0000000000..353a1ce36d --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.offset.fetch.abort/server.rpt @@ -0,0 +1,153 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .generationId(1) + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +write flush + +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} + +connected + +write abort diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.recovery/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.recovery/client.rpt new file mode 100644 index 0000000000..c3a673319e --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.recovery/client.rpt @@ -0,0 +1,301 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_MIGRATE_SIGNAL + + +connect await SENT_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +read notify RECEIVED_LEADER + +write zilla:data.empty +write flush + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} + +write close + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +read notify RECEIVED_PARTITION_METADATA + +write close + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 100, 0, mqtt_kafka:publishMetadata() + .producer(12345, 3) + .packetId(1) + .build()) + .partition(1, 70, 0, mqtt_kafka:publishMetadata() + .producer(12345, 3) + .packetId(2) + .build()) + .build() + .build()} +read zilla:data.empty + +write close +read closed + + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 10, 0, mqtt_kafka:publishMetadata() + .producer(12345, 3) + .packetId(3) + .build()) + .build() + .build()} +read zilla:data.empty +read notify RECEIVED_INITIAL_OFFSETS + +write close +read closed + + +connect await RECEIVED_INITIAL_OFFSETS + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 101, mqtt_kafka:publishMetadata() + .producer(12345, 3) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty + + +connect await RECEIVED_INITIAL_OFFSETS + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + +read advised zilla:flush diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.recovery/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.recovery/server.rpt new file mode 100644 index 0000000000..728b295bb3 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.recovery/server.rpt @@ -0,0 +1,288 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .generationId(1) + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +write flush + +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 100, 0, mqtt_kafka:publishMetadata() + .producer(12345, 3) + .packetId(1) + .build()) + .partition(1, 70, 0, mqtt_kafka:publishMetadata() + .producer(12345, 3) + .packetId(2) + .build()) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 10, 0, mqtt_kafka:publishMetadata() + .producer(12345, 3) + .packetId(3) + .build()) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 101, mqtt_kafka:publishMetadata() + .producer(12345, 3) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# session expiry cancellation signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} + +write advise zilla:flush diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.retained/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.retained/client.rpt new file mode 100644 index 0000000000..61b4e9c8be --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.retained/client.rpt @@ -0,0 +1,533 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_MIGRATE_SIGNAL + + +connect await SENT_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +read notify RECEIVED_LEADER + +write zilla:data.empty +write flush + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} + +write close + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +read notify RECEIVED_PARTITION_METADATA + +write close + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +read zilla:data.empty + +write close +read closed + + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +read zilla:data.empty +read notify RECEIVED_INITIAL_OFFSETS + +write close +read closed + + +connect await RECEIVED_INITIAL_OFFSETS + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} +read notify RECEIVED_PRODUCER + +connected + +write close +read closed + + +connect await RECEIVED_PRODUCER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(1, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write notify SENT_INITIAL_OFFSET_COMMIT + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 2, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 2, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + + +connect await SENT_INITIAL_OFFSET_COMMIT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + +read advised zilla:flush +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .packetId(1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .packetId(1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt-messages") + .partition(-1, -2) + .ackMode("IN_SYNC_REPLICAS") + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .partitionId(0) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .producerId(1) + .producerEpoch(1) + .partition(-1, 1) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client") + .header("zilla:qos", "2") + .build() + .build()} +write "message" +write flush + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt-retained") + .partition(-1, -2) + .ackMode("IN_SYNC_REPLICAS") + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .partitionId(0) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .producerId(1) + .producerEpoch(1) + .partition(-1, 1) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client") + .header("zilla:qos", "2") + .build() + .build()} +write "message" +write flush + diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.retained/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.retained/server.rpt new file mode 100644 index 0000000000..c50e6f077d --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2.retained/server.rpt @@ -0,0 +1,499 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .generationId(1) + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +write flush + +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} + +connected + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(1, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 2, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 2, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# session expiry cancellation signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} + +write advise zilla:flush + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .packetId(1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .packetId(1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt-messages") + .partition(-1, -2) + .ackMode("IN_SYNC_REPLICAS") + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .build() + .build()} + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .partitionId(0) + .build() + .build()} + + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .producerId(1) + .producerEpoch(1) + .partition(-1, 1) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client") + .header("zilla:qos", "2") + .build() + .build()} +read "message" + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt-retained") + .partition(-1, -2) + .ackMode("IN_SYNC_REPLICAS") + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .build() + .build()} + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .partitionId(0) + .build() + .build()} + + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .producerId(1) + .producerEpoch(1) + .partition(-1, 1) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client") + .header("zilla:qos", "2") + .build() + .build()} +read "message" diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2/client.rpt index c6e3b10966..2d567ec776 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2/client.rpt @@ -17,6 +17,419 @@ connect "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_MIGRATE_SIGNAL + + +connect await SENT_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +read notify RECEIVED_LEADER + +write zilla:data.empty +write flush + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} + +write close + + +connect await RECEIVED_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +read notify RECEIVED_PARTITION_METADATA + +write close + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +read zilla:data.empty + +write close +read closed + + +connect await RECEIVED_PARTITION_METADATA + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +read zilla:data.empty +read notify RECEIVED_INITIAL_OFFSETS + +write close +read closed + + +connect await RECEIVED_INITIAL_OFFSETS + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} +read notify RECEIVED_PRODUCER + +connected + +write close +read closed + + +connect await RECEIVED_PRODUCER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(1, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write notify SENT_INITIAL_OFFSET_COMMIT + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 2, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 3, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + + +connect await SENT_INITIAL_OFFSET_COMMIT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + +read advised zilla:flush +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .packetId(1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 2, mqtt_kafka:publishMetadata() + .producer(1, 1) + .packetId(1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +write zilla:data.empty +write flush + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() @@ -29,12 +442,31 @@ write zilla:begin.ext ${kafka:beginEx() connected +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .partitionId(0) + .build() + .build()} + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() .produce() .deferred(0) - .partition(-1, -1) + .producerId(1) + .producerEpoch(1) + .partition(-1, 1) .key("sensor/one") .header("zilla:filter", "sensor") .header("zilla:filter", "one") @@ -45,3 +477,23 @@ write zilla:data.ext ${kafka:dataEx() write "message" write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .producerId(1) + .producerEpoch(1) + .partition(-1, 2) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client") + .header("zilla:qos", "2") + .build() + .build()} +write "message2" +write flush + + + diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2/server.rpt index 53377dd9f2..5fdddaf3c0 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.qos2/server.rpt @@ -17,6 +17,390 @@ accept "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#migrate") + .hashKey("client") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-session") + .protocol("highlander") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .timeout(1000) + .build() + .build()} + +connected + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .generationId(1) + .leaderId("consumer-1") + .memberId("consumer-1") + .members("consumer-1") + .build() + .build()} +write flush + +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-messages") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .partition(1, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("mqtt-retained") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, -2) + .build() + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-messages") + .partition(0) + .partition(1) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .partition(1, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-session") + .host("broker1.example.com") + .port(9092) + .topic("mqtt-retained") + .partition(0) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .partition(0, 1, 0) + .build() + .build()} +write zilla:data.empty +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(0) + .producerEpoch(0) + .build() + .build()} + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .initProducerId() + .producerId(1) + .producerEpoch(1) + .build() + .build()} + +connected + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(1, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-retained") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +#Triggered by PUBREL +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 2, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +#Triggered by PUBREL +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 3, mqtt_kafka:publishMetadata() + .producer(1, 1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .key("client") + .build() + .filter() + .key("client#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# session expiry cancellation signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .partition(-1, -1) + .key("client#expiry-signal") + .hashKey("client") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client") + .delay(1000) + .expireAt(-1) + .build() + .build()} + +write advise zilla:flush + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-session") + .memberId("consumer-1") + .instanceId("zilla") + .host("broker1.example.com") + .port(9092) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 1, mqtt_kafka:publishMetadata() + .producer(1, 1) + .packetId(1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .topic("mqtt-messages") + .progress(0, 2, mqtt_kafka:publishMetadata() + .producer(1, 1) + .packetId(1) + .build()) + .generationId(1) + .leaderEpoch(0) + .build() + .build()} +read zilla:data.empty + + accepted read zilla:begin.ext ${kafka:beginEx() @@ -31,12 +415,31 @@ read zilla:begin.ext ${kafka:beginEx() connected +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .build() + .build()} + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .hashKey("sensor/one") + .partitionId(0) + .build() + .build()} + read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) .merged() .produce() .deferred(0) - .partition(-1, -1) + .producerId(1) + .producerEpoch(1) + .partition(-1, 1) .key("sensor/one") .header("zilla:filter", "sensor") .header("zilla:filter", "one") @@ -45,3 +448,22 @@ read zilla:data.ext ${kafka:matchDataEx() .build() .build()} read "message" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .produce() + .deferred(0) + .producerId(1) + .producerEpoch(1) + .partition(-1, 2) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client") + .header("zilla:qos", "2") + .build() + .build()} +read "message2" + + diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt deleted file mode 100644 index e5d9d4cd82..0000000000 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt +++ /dev/null @@ -1,52 +0,0 @@ -# -# Copyright 2021-2023 Aklivity Inc -# -# Licensed under the Aklivity Community License (the "License"); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at -# -# https://www.aklivity.io/aklivity-community-license/ -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# - -connect "zilla://streams/kafka0" - option zilla:window 8192 - option zilla:transmission "duplex" - -write zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .merged() - .capabilities("PRODUCE_ONLY") - .topic("mqtt-messages") - .partition(-1, -2) - .ackMode("NONE") - .build() - .build()} - -connected - -write notify MESSAGES_DONE - - -connect await MESSAGES_DONE - "zilla://streams/kafka0" - option zilla:window 8192 - option zilla:transmission "duplex" - -write zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .merged() - .capabilities("PRODUCE_ONLY") - .topic("mqtt-retained") - .partition(-1, -2) - .ackMode("NONE") - .build() - .build()} - -connected - -read advised zilla:flush diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt deleted file mode 100644 index dd3fd10a58..0000000000 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt +++ /dev/null @@ -1,50 +0,0 @@ -# -# Copyright 2021-2023 Aklivity Inc -# -# Licensed under the Aklivity Community License (the "License"); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at -# -# https://www.aklivity.io/aklivity-community-license/ -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# - -accept "zilla://streams/kafka0" - option zilla:window 8192 - option zilla:transmission "duplex" - -accepted - -read zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .merged() - .capabilities("PRODUCE_ONLY") - .topic("mqtt-messages") - .partition(-1, -2) - .ackMode("NONE") - .build() - .build()} - -connected - - -accepted - -read zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .merged() - .capabilities("PRODUCE_ONLY") - .topic("mqtt-retained") - .partition(-1, -2) - .ackMode("NONE") - .build() - .build()} - -connected - -write advise zilla:flush - diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt deleted file mode 100644 index 612ba69b23..0000000000 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt +++ /dev/null @@ -1,32 +0,0 @@ -# -# Copyright 2021-2023 Aklivity Inc -# -# Licensed under the Aklivity Community License (the "License"); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at -# -# https://www.aklivity.io/aklivity-community-license/ -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# - -connect "zilla://streams/kafka0" - option zilla:window 8192 - option zilla:transmission "duplex" - -write zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .merged() - .capabilities("PRODUCE_ONLY") - .topic("mqtt-messages") - .partition(-1, -2) - .ackMode("NONE") - .build() - .build()} - -connected - -read advised zilla:flush diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt deleted file mode 100644 index 0ec69c9a88..0000000000 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt +++ /dev/null @@ -1,34 +0,0 @@ -# -# Copyright 2021-2023 Aklivity Inc -# -# Licensed under the Aklivity Community License (the "License"); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at -# -# https://www.aklivity.io/aklivity-community-license/ -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# - -accept "zilla://streams/kafka0" - option zilla:window 8192 - option zilla:transmission "duplex" - -accepted - -read zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .merged() - .capabilities("PRODUCE_ONLY") - .topic("mqtt-messages") - .partition(-1, -2) - .ackMode("NONE") - .build() - .build()} - -connected - -write advise zilla:flush diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard.mixed.qos/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard.mixed.qos/client.rpt index b2659a5bab..db7deb6ca0 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard.mixed.qos/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard.mixed.qos/client.rpt @@ -225,7 +225,7 @@ write advise zilla:flush ${kafka:flushEx() .merged() .consumer() .progress(0, 3, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(2) .build()) .correlationId(2) diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard.mixed.qos/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard.mixed.qos/server.rpt index 45df409a42..3cdb43bffd 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard.mixed.qos/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard.mixed.qos/server.rpt @@ -229,7 +229,7 @@ read advised zilla:flush ${kafka:matchFlushEx() .merged() .consumer() .progress(0, 3, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(2) .build()) .correlationId(2) diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.qos2/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.qos2/client.rpt index bb39860052..a69d804085 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.qos2/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.qos2/client.rpt @@ -137,7 +137,7 @@ write advise zilla:flush ${kafka:flushEx() .merged() .consumer() .progress(0, 3, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(1) .build()) .correlationId(1) diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.qos2/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.qos2/server.rpt index 9ca39d5ab8..5fa79e6863 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.qos2/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.qos2/server.rpt @@ -137,7 +137,7 @@ read advised zilla:flush ${kafka:matchFlushEx() .merged() .consumer() .progress(0, 3, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(1) .build()) .correlationId(1) diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.messages.mixture.qos/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.messages.mixture.qos/client.rpt index 4619492645..9a163c068a 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.messages.mixture.qos/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.messages.mixture.qos/client.rpt @@ -189,7 +189,7 @@ write advise zilla:flush ${kafka:flushEx() .merged() .consumer() .progress(0, 5, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(2) .build()) .correlationId(2) diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.messages.mixture.qos/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.messages.mixture.qos/server.rpt index 781188782c..9170ccb85b 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.messages.mixture.qos/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.messages.mixture.qos/server.rpt @@ -195,7 +195,7 @@ read advised zilla:flush ${kafka:matchFlushEx() .merged() .consumer() .progress(0, 5, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(2) .build()) .correlationId(2) diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.incomplete.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.incomplete.message/client.rpt index 91ab9353d8..6ea03c6f8e 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.incomplete.message/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.incomplete.message/client.rpt @@ -141,7 +141,7 @@ write advise zilla:flush ${kafka:flushEx() .merged() .consumer() .progress(0, 3, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(1) .build()) .correlationId(1) @@ -249,7 +249,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("FETCH_ONLY") .topic("mqtt-messages") - .partition(0, 2, 3, 3, mqtt:metadata() + .partition(0, 2, 3, 3, mqtt_kafka:subscribeMetadata() .metadata(1) .build()) .build() diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.incomplete.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.incomplete.message/server.rpt index adf5fdc077..fef08595d9 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.incomplete.message/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.incomplete.message/server.rpt @@ -141,7 +141,7 @@ read advised zilla:flush ${kafka:matchFlushEx() .merged() .consumer() .progress(0, 3, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(1) .build()) .correlationId(1) @@ -239,7 +239,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("FETCH_ONLY") .topic("mqtt-messages") - .partition(0, 2, 3, 3, mqtt:metadata() + .partition(0, 2, 3, 3, mqtt_kafka:subscribeMetadata() .metadata(1) .build()) .build() diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.unreceived.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.unreceived.message/client.rpt index 899fb9fceb..13394d3897 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.unreceived.message/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.unreceived.message/client.rpt @@ -267,7 +267,7 @@ write advise zilla:flush ${kafka:flushEx() .merged() .consumer() .progress(0, 3, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(2) .build()) .correlationId(2) diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.unreceived.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.unreceived.message/server.rpt index 8b0140d779..fef2858068 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.unreceived.message/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.reconnect.replay.qos2.unreceived.message/server.rpt @@ -259,7 +259,7 @@ read advised zilla:flush ${kafka:matchFlushEx() .merged() .consumer() .progress(0, 3, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(2) .build()) .correlationId(2) diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.replay.retained.message.qos2/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.replay.retained.message.qos2/client.rpt index e836f5fc39..4e6f38e92b 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.replay.retained.message.qos2/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.replay.retained.message.qos2/client.rpt @@ -217,7 +217,7 @@ write advise zilla:flush ${kafka:flushEx() .merged() .consumer() .progress(0, 3, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(1) .build()) .correlationId(1) diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.replay.retained.message.qos2/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.replay.retained.message.qos2/server.rpt index 01b6627d94..b96ddb2bf4 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.replay.retained.message.qos2/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.replay.retained.message.qos2/server.rpt @@ -211,7 +211,7 @@ read advised zilla:flush ${kafka:matchFlushEx() .merged() .consumer() .progress(0, 3, - mqtt:metadata() + mqtt_kafka:subscribeMetadata() .metadata(1) .build()) .correlationId(1) diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.mixture.qos/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.mixture.qos/client.rpt index 9081f46d80..b69e3f718e 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.mixture.qos/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.mixture.qos/client.rpt @@ -17,6 +17,54 @@ connect "zilla://streams/mqtt0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + +write await SENT_DATA_TWO +# Triggered by PUBREL +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +read advised zilla:flush ${mqtt:matchFlushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() @@ -38,7 +86,8 @@ write "message1" write flush -connect "zilla://streams/mqtt0" +connect await RECEIVED_SESSION_STATE + "zilla://streams/mqtt0" option zilla:window 8192 option zilla:transmission "duplex" @@ -57,13 +106,16 @@ write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() .qos("EXACTLY_ONCE") + .packetId(1) .build() .build()} write "message2" write flush +write notify SENT_DATA_TWO -connect "zilla://streams/mqtt0" +connect await SENT_DATA_TWO + "zilla://streams/mqtt0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.mixture.qos/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.mixture.qos/server.rpt index 01739ace31..770df0be77 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.mixture.qos/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.mixture.qos/server.rpt @@ -19,6 +19,49 @@ accept "zilla://streams/mqtt0" accepted +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + + +accepted + read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() @@ -56,6 +99,7 @@ read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() .qos("EXACTLY_ONCE") + .packetId(1) .build() .build()} read "message2" diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.abort/client.rpt similarity index 56% rename from specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.abort/client.rpt index e62fd29994..4bb6c96bfa 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.abort/client.rpt @@ -14,18 +14,18 @@ # connect "zilla://streams/mqtt0" - option zilla:window 8192 - option zilla:transmission "duplex" + option zilla:window 8192 + option zilla:transmission "duplex" write zilla:begin.ext ${mqtt:beginEx() - .typeId(zilla:id("mqtt")) - .publish() - .clientId("client") - .topic("sensor/one") - .flags("RETAIN") - .build() - .build()} + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} -connected - -read advised zilla:flush +connect aborted diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.abort/server.rpt similarity index 62% rename from specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.abort/server.rpt index cd81e9e0f4..b4ed4e9e30 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.abort/server.rpt @@ -13,18 +13,8 @@ # specific language governing permissions and limitations under the License. # -connect "zilla://streams/mqtt0" +accept "zilla://streams/mqtt0" option zilla:window 8192 option zilla:transmission "duplex" -write zilla:begin.ext ${mqtt:beginEx() - .typeId(zilla:id("mqtt")) - .publish() - .clientId("client") - .topic("sensor/one") - .build() - .build()} - -connected - -read advised zilla:flush +rejected diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase1/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase1/client.rpt new file mode 100644 index 0000000000..72055eab6e --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase1/client.rpt @@ -0,0 +1,63 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .qos(2) + .build() + .build()} + +connected +read notify PUBLISH_CONNECTED +read aborted diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase1/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase1/server.rpt new file mode 100644 index 0000000000..46a062b407 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase1/server.rpt @@ -0,0 +1,61 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .qos(2) + .build() + .build()} + +connected + +write abort diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase2/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase2/client.rpt new file mode 100644 index 0000000000..d41271d752 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase2/client.rpt @@ -0,0 +1,44 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") + .clientId("client") + .build() + .build()} + +connected +read notify RECEIVED_SESSION_CONNECTED + +read aborted diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase2/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase2/server.rpt new file mode 100644 index 0000000000..30e1bbe368 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.offset.commit.abort.phase2/server.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") + .clientId("client") + .build() + .build()} + +connected + +write abort diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.recovery/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.recovery/client.rpt new file mode 100644 index 0000000000..eb207d9b9a --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.recovery/client.rpt @@ -0,0 +1,61 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") + .clientId("client") + .packetId(1) + .packetId(2) + .packetId(3) + .build() + .build()} + +connected + +read zilla:data.empty + +# Triggered by PUBREL +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.recovery/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.recovery/server.rpt new file mode 100644 index 0000000000..43882d38cd --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.recovery/server.rpt @@ -0,0 +1,64 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") + .clientId("client") + .packetId(1) + .packetId(2) + .packetId(3) + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.retained/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.retained/client.rpt new file mode 100644 index 0000000000..82f9e3ae6b --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.retained/client.rpt @@ -0,0 +1,93 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + +write await SENT_DATA_ONE +# Triggered by PUBREL +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .qos(2) + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .qos("EXACTLY_ONCE") + .flags("RETAIN") + .packetId(1) + .build() + .build()} +write "message" +write flush +write notify SENT_DATA_ONE + + + diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.retained/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.retained/server.rpt new file mode 100644 index 0000000000..aeb5a33408 --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2.retained/server.rpt @@ -0,0 +1,87 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .qos(2) + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .qos("EXACTLY_ONCE") + .flags("RETAIN") + .packetId(1) + .build() + .build()} +read "message" + + diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2/client.rpt index bb98405e3a..e73e50b548 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2/client.rpt @@ -13,11 +13,76 @@ # specific language governing permissions and limitations under the License. # - connect "zilla://streams/mqtt0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + +# Triggered by PUBREL +write await SENT_DATA_ONE +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +read advised zilla:flush ${mqtt:matchFlushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} +read notify RECEIVED_FLUSH + +write await SENT_DATA_TWO +# Triggered by PUBREL +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +read advised zilla:flush ${mqtt:matchFlushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() @@ -33,7 +98,21 @@ write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() .qos("EXACTLY_ONCE") + .packetId(1) .build() .build()} write "message" write flush +write notify SENT_DATA_ONE + +write await RECEIVED_FLUSH +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .qos("EXACTLY_ONCE") + .packetId(1) + .build() + .build()} +write "message2" +write flush +write notify SENT_DATA_TWO diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2/server.rpt index fbb4196b8c..c161fc4087 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.qos2/server.rpt @@ -19,6 +19,63 @@ accept "zilla://streams/mqtt0" accepted +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .publishQosMax(2) + .capabilities("REDIRECT") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .subscribeQosMax(2) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + + +accepted + read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() @@ -34,6 +91,17 @@ read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() .qos("EXACTLY_ONCE") + .packetId(1) .build() .build()} read "message" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .qos("EXACTLY_ONCE") + .packetId(1) + .build() + .build()} +read "message2" + diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt index 8df5293153..f05933944a 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt index be7bfea47a..bc15aa62a0 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt index 1d83b99862..0c273b45af 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} @@ -78,6 +79,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt index 3aa61ab5c0..5868516706 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} @@ -74,6 +75,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt index 628c0114fe..b5ff14631c 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt index 7afd1331b1..60bf65d849 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt index 047113b2db..aef2a46ae0 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} @@ -76,6 +77,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt index 756f369c1c..48b36ed12a 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} @@ -74,6 +75,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt index 0a23695f58..a9571e6855 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt index 58940fdcd8..f0917f5bd9 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt index 9b17e7cb7a..dd0f86e790 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(100) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt index 2232e77d8d..50d344d195 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(100) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt index 1b98a74e50..13a767a911 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(0) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt index 4916dcb1ed..81e8b61765 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(0) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt index 6fe5bec3e8..5c65469d5e 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} @@ -83,6 +84,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("CLEAN_START") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt index 6475a7af1f..f5e0bd707e 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} @@ -70,7 +71,9 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .flags("CLEAN_START") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/client.rpt index 84947bb6d0..15731ce18a 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/server.rpt index 798d86209c..3ea2769d0e 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/client.rpt index 0fa7aebe20..054bc276a2 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/server.rpt index 3b1335a150..286b9bc990 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/client.rpt index 6383f9fd5d..30487a97ae 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/server.rpt index 866d39a084..b9bca19e6f 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt index c38a0850d0..20f1d0b294 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt index 7b9e265b8b..25b0609f4a 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt @@ -22,6 +22,8 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt index d45c693511..8e2f09eb74 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt index 3a13bf2953..4a91ab16f3 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt index 64a0c9dfe3..78e8372f07 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt index 5a6717a9be..c0715fb53f 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt index b6296130be..a1d1b43209 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} @@ -29,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) - .qosMax(2) + .subscribeQosMax(2) .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") .clientId("client-1") .build() diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt index 233cd07f0f..fd0f073269 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt @@ -23,6 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} @@ -31,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) - .qosMax(2) + .subscribeQosMax(2) .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") .clientId("client-1") .build() diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt index a3e5b43a95..14b65eaeb0 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt index a5a01e3656..72b8c1deb2 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt index 12f50776ef..228dfd53b6 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt @@ -21,6 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt index cf6802be63..612db4b976 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.10k.abort.deliver.will/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.10k.abort.deliver.will/client.rpt index 2bb918808d..ed9cda3ee1 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.10k.abort.deliver.will/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.10k.abort.deliver.will/client.rpt @@ -23,6 +23,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.10k.abort.deliver.will/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.10k.abort.deliver.will/server.rpt index 591401e456..9cc8827b92 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.10k.abort.deliver.will/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.10k.abort.deliver.will/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt index c4c18199eb..d0c642cbc8 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt @@ -23,6 +23,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt index 984cf558ef..5defffc436 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt index add6f3a5d4..a02b6af6ed 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt @@ -23,6 +23,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt index e906b8a484..11587fd0bc 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt index 5a026075d4..fd0524ca1f 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt @@ -23,6 +23,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("WILL", "CLEAN_START") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt index ad239e3177..fc145f9528 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .session() .flags("WILL", "CLEAN_START") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt index 4fa7018a28..f488b67185 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt @@ -23,6 +23,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt index c950d5183b..c8ffa22f88 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt index 84cea0e4ca..90f8787e1d 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("one") .build() .build()} @@ -58,6 +59,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("one") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt index 84ff6c7013..a5e86de86d 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("one") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/client.rpt index 317377462d..42760232f6 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/client.rpt @@ -23,6 +23,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/server.rpt index a5d42b4fbc..aee189c3cb 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt index ac411f9a7f..e419fc0a89 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt @@ -23,6 +23,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt index 8ddbe51d94..c56bb900b2 100644 --- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt +++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .session() .flags("WILL") .expiry(1) + .capabilities("REDIRECT") .clientId("client-1") .build() .build()} diff --git a/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/internal/MqttKafkaFunctionsTest.java b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/internal/MqttKafkaFunctionsTest.java new file mode 100644 index 0000000000..d8a121a61f --- /dev/null +++ b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/internal/MqttKafkaFunctionsTest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.aklivity.zilla.specs.binding.mqtt.kafka.internal; + +import static org.junit.Assert.assertEquals; + +import java.util.function.IntConsumer; + +import org.agrona.BitUtil; +import org.agrona.DirectBuffer; +import org.agrona.collections.IntArrayList; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.Test; + +import io.aklivity.zilla.specs.binding.mqtt.kafka.internal.types.MqttPublishOffsetMetadataFW; +import io.aklivity.zilla.specs.binding.mqtt.kafka.internal.types.MqttSubscribeOffsetMetadataFW; + +public class MqttKafkaFunctionsTest +{ + @Test + public void shouldGetMapper() + { + MqttKafkaFunctions.Mapper mapper = new MqttKafkaFunctions.Mapper(); + assertEquals("mqtt_kafka", mapper.getPrefixName()); + } + @Test + public void shouldEncodeMqttOffsetMetadata() + { + final String state = MqttKafkaFunctions.subscribeMetadata() + .metadata(1) + .metadata(2) + .build(); + + final IntArrayList metadataList = new IntArrayList(); + UnsafeBuffer buffer = new UnsafeBuffer(BitUtil.fromHex(state)); + MqttSubscribeOffsetMetadataFW offsetMetadata = new MqttSubscribeOffsetMetadataFW().wrap(buffer, 0, buffer.capacity()); + offsetMetadata.packetIds().forEachRemaining((IntConsumer) metadataList::add); + + assertEquals(1, offsetMetadata.version()); + assertEquals(1, (int) metadataList.get(0)); + assertEquals(2, (int) metadataList.get(1)); + } + + @Test + public void shouldEncodeMqttPublishOffsetMetadata() + { + final String state = MqttKafkaFunctions.publishMetadata() + .producer(1L, (short) 1) + .packetId(1) + .build(); + + DirectBuffer buffer = new UnsafeBuffer(BitUtil.fromHex(state)); + MqttPublishOffsetMetadataFW offsetMetadata = new MqttPublishOffsetMetadataFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals(1, offsetMetadata.version()); + assertEquals(1, offsetMetadata.packetIds().nextInt()); + assertEquals(1, offsetMetadata.producerId()); + assertEquals(1, offsetMetadata.producerEpoch()); + } +} diff --git a/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java index 97a896ad9f..ec7de4d86e 100644 --- a/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java +++ b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java @@ -62,15 +62,6 @@ public void shouldPublishReceiveServerSentAbort() throws Exception k3po.finish(); } - @Test - @Specification({ - "${kafka}/publish.server.sent.flush/client", - "${kafka}/publish.server.sent.flush/server"}) - public void shouldPublishReceiveServerSentFlush() throws Exception - { - k3po.finish(); - } - @Test @Specification({ "${kafka}/publish.server.sent.reset/client", @@ -98,15 +89,6 @@ public void shouldPublishReceiveServerSentRetainedAbort() throws Exception k3po.finish(); } - @Test - @Specification({ - "${kafka}/publish.retained.server.sent.flush/client", - "${kafka}/publish.retained.server.sent.flush/server"}) - public void shouldPublishReceiveServerSentRetainedFlush() throws Exception - { - k3po.finish(); - } - @Test @Specification({ "${kafka}/publish.retained.server.sent.reset/client", @@ -863,6 +845,75 @@ public void shouldPublishQoS2Message() throws Exception k3po.finish(); } + @Test + @Specification({ + "${kafka}/publish.qos2.retained/client", + "${kafka}/publish.qos2.retained/server"}) + public void shouldPublishQoS2MessageRetained() throws Exception + { + k3po.start(); + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/publish.qos2.recovery/client", + "${kafka}/publish.qos2.recovery/server"}) + public void shouldPublishQoS2MessageDuringRecovery() throws Exception + { + k3po.start(); + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/publish.qos2.meta.abort/client", + "${kafka}/publish.qos2.meta.abort/server"}) + public void shouldSessionReceiveQos2MetaSentAbort() throws Exception + { + k3po.start(); + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/publish.qos2.offset.fetch.abort/client", + "${kafka}/publish.qos2.offset.fetch.abort/server"}) + public void shouldSessionReceiveQos2OffsetFetchSentAbort() throws Exception + { + k3po.start(); + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/publish.qos2.init.producer.abort/client", + "${kafka}/publish.qos2.init.producer.abort/server"}) + public void shouldSessionReceiveQos2InitProducerSentAbort() throws Exception + { + k3po.start(); + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/publish.qos2.offset.commit.abort.phase1/client", + "${kafka}/publish.qos2.offset.commit.abort.phase1/server"}) + public void shouldPublishReceiveQos2OffsetCommitSentAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/publish.qos2.offset.commit.abort.phase2/client", + "${kafka}/publish.qos2.offset.commit.abort.phase2/server"}) + public void shouldSessionReceiveQos2OffsetCommitSentAbort() throws Exception + { + k3po.start(); + k3po.finish(); + } + @Test @Specification({ "${kafka}/publish.mixture.qos/client", diff --git a/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java index 132d0b034e..973a4d0661 100644 --- a/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java +++ b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java @@ -62,15 +62,6 @@ public void shouldPublishReceiveServerSentAbort() throws Exception k3po.finish(); } - @Test - @Specification({ - "${mqtt}/publish.server.sent.flush/client", - "${mqtt}/publish.server.sent.flush/server"}) - public void shouldPublishReceiveServerSentFlush() throws Exception - { - k3po.finish(); - } - @Test @Specification({ "${mqtt}/publish.server.sent.reset/client", @@ -233,15 +224,6 @@ public void shouldSubscribeReceiveServerSentAbort() throws Exception k3po.finish(); } - @Test - @Specification({ - "${mqtt}/subscribe.server.sent.flush/client", - "${mqtt}/subscribe.server.sent.flush/server"}) - public void shouldSubscribeReceiveServerSentFlush() throws Exception - { - k3po.finish(); - } - @Test @Specification({ "${mqtt}/subscribe.server.sent.reset/client", @@ -687,6 +669,55 @@ public void shouldPublishQoS2Message() throws Exception k3po.finish(); } + @Test + @Specification({ + "${mqtt}/publish.qos2.retained/client", + "${mqtt}/publish.qos2.retained/server"}) + public void shouldPublishQoS2MessageRetained() throws Exception + { + k3po.start(); + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/publish.qos2.recovery/client", + "${mqtt}/publish.qos2.recovery/server"}) + public void shouldPublishQoS2MessageDuringRecovery() throws Exception + { + k3po.start(); + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/publish.qos2.abort/client", + "${mqtt}/publish.qos2.abort/server"}) + public void shouldSessionReceiveQos2Abort() throws Exception + { + k3po.start(); + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/publish.qos2.offset.commit.abort.phase1/client", + "${mqtt}/publish.qos2.offset.commit.abort.phase1/server"}) + public void shouldPublishReceiveQos2OffsetCommitSentAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/publish.qos2.offset.commit.abort.phase2/client", + "${mqtt}/publish.qos2.offset.commit.abort.phase2/server"}) + public void shouldSessionReceiveQos2OffsetCommitAbort() throws Exception + { + k3po.start(); + k3po.finish(); + } + @Test @Specification({ "${mqtt}/publish.mixture.qos/client", diff --git a/specs/binding-mqtt.spec/pom.xml b/specs/binding-mqtt.spec/pom.xml index 93bc0848c9..a3bfaa7f3b 100644 --- a/specs/binding-mqtt.spec/pom.xml +++ b/specs/binding-mqtt.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java b/specs/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java index d3913cc7c3..42d726c4e7 100644 --- a/specs/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java +++ b/specs/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java @@ -19,11 +19,13 @@ import static java.nio.charset.StandardCharsets.UTF_8; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; +import java.util.PrimitiveIterator; import java.util.concurrent.ThreadLocalRandom; import java.util.function.Predicate; -import org.agrona.BitUtil; import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; import org.agrona.concurrent.UnsafeBuffer; @@ -53,7 +55,6 @@ import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttDataExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttExtensionKind; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttFlushExFW; -import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttOffsetMetadataFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttOffsetStateFlags; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttPublishBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttPublishDataExFW; @@ -62,6 +63,7 @@ import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSessionBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSessionDataExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSessionDataKind; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSessionFlushExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSubscribeBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSubscribeDataExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSubscribeFlushExFW; @@ -111,21 +113,21 @@ public static MqttFlushExBuilder flushEx() } @Function - public static MqttResetExBuilder resetEx() + public static MqttFlushExMatcherBuilder matchFlushEx() { - return new MqttResetExBuilder(); + return new MqttFlushExMatcherBuilder(); } @Function - public static MqttSessionStateBuilder session() + public static MqttResetExBuilder resetEx() { - return new MqttSessionStateBuilder(); + return new MqttResetExBuilder(); } @Function - public static MqttOffsetMetadataBuilder metadata() + public static MqttSessionStateBuilder session() { - return new MqttOffsetMetadataBuilder(); + return new MqttSessionStateBuilder(); } @Function @@ -224,6 +226,13 @@ public MqttSessionBeginExBuilder clientId( return this; } + public MqttSessionBeginExBuilder packetId( + int packetId) + { + sessionBeginExRW.appendPacketIds((short) packetId); + return this; + } + public MqttSessionBeginExBuilder expiry( int expiry) { @@ -231,10 +240,17 @@ public MqttSessionBeginExBuilder expiry( return this; } - public MqttSessionBeginExBuilder qosMax( - int qosMax) + public MqttSessionBeginExBuilder subscribeQosMax( + int subscribeQosMax) + { + sessionBeginExRW.subscribeQosMax(subscribeQosMax); + return this; + } + + public MqttSessionBeginExBuilder publishQosMax( + int publishQosMax) { - sessionBeginExRW.qosMax(qosMax); + sessionBeginExRW.publishQosMax(publishQosMax); return this; } @@ -570,6 +586,13 @@ public MqttPublishDataExBuilder flags( return this; } + public MqttPublishDataExBuilder packetId( + int packetId) + { + publishDataExRW.packetId(packetId); + return this; + } + public MqttPublishDataExBuilder expiryInterval( int expiryInterval) { @@ -680,6 +703,13 @@ public MqttFlushExBuilder typeId( return this; } + public MqttSessionFlushExBuilder session() + { + flushExRW.kind(MqttExtensionKind.SESSION.value()); + + return new MqttSessionFlushExBuilder(); + } + public MqttSubscribeFlushExBuilder subscribe() { flushExRW.kind(MqttExtensionKind.SUBSCRIBE.value()); @@ -687,13 +717,37 @@ public MqttSubscribeFlushExBuilder subscribe() return new MqttSubscribeFlushExBuilder(); } + public final class MqttSessionFlushExBuilder + { + private final MqttSessionFlushExFW.Builder sessionFlushExRW = new MqttSessionFlushExFW.Builder(); + + private MqttSessionFlushExBuilder() + { + sessionFlushExRW.wrap(writeBuffer, MqttFlushExFW.FIELD_OFFSET_SESSION, writeBuffer.capacity()); + } + + public MqttSessionFlushExBuilder packetId( + int packetId) + { + sessionFlushExRW.packetId(packetId); + return this; + } + + public MqttFlushExBuilder build() + { + final MqttSessionFlushExFW sessionFlushEx = sessionFlushExRW.build(); + flushExRO.wrap(writeBuffer, 0, sessionFlushEx.limit()); + return MqttFlushExBuilder.this; + } + } + public final class MqttSubscribeFlushExBuilder { private final MqttSubscribeFlushExFW.Builder subscribeFlushExRW = new MqttSubscribeFlushExFW.Builder(); private MqttSubscribeFlushExBuilder() { - subscribeFlushExRW.wrap(writeBuffer, MqttBeginExFW.FIELD_OFFSET_PUBLISH, writeBuffer.capacity()); + subscribeFlushExRW.wrap(writeBuffer, MqttFlushExFW.FIELD_OFFSET_SUBSCRIBE, writeBuffer.capacity()); } public MqttSubscribeFlushExBuilder packetId( @@ -860,34 +914,6 @@ public byte[] build() } } - public static final class MqttOffsetMetadataBuilder - { - private final MqttOffsetMetadataFW.Builder offsetMetadataRW = new MqttOffsetMetadataFW.Builder(); - - byte version = 1; - - - private MqttOffsetMetadataBuilder() - { - MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); - offsetMetadataRW.wrap(writeBuffer, 0, writeBuffer.capacity()); - offsetMetadataRW.version(version); - } - - public MqttOffsetMetadataBuilder metadata( - int packetId) - { - offsetMetadataRW.appendPacketIds((short) packetId); - return this; - } - - public String build() - { - final MqttOffsetMetadataFW offsetMetadata = offsetMetadataRW.build(); - return BitUtil.toHex(offsetMetadata.buffer().byteArray(), offsetMetadata.offset(), offsetMetadata.limit()); - } - } - public static final class MqttWillMessageBuilder { private final MqttWillMessageFW.Builder willMessageRW = new MqttWillMessageFW.Builder(); @@ -1434,14 +1460,17 @@ private boolean matchFilters( public final class MqttSessionBeginExMatcherBuilder { private String16FW clientId; + private List packetIds; private Integer expiry; private Integer flags; private Integer capabilities; - private Integer qosMax; + private Integer subscribeQosMax; + private int publishQosMax; private Integer packetSizeMax; private MqttSessionBeginExMatcherBuilder() { + packetIds = new ArrayList<>(); } public MqttSessionBeginExMatcherBuilder clientId( @@ -1458,10 +1487,24 @@ public MqttSessionBeginExMatcherBuilder expiry( return this; } - public MqttSessionBeginExMatcherBuilder qosMax( - int qosMax) + public MqttSessionBeginExMatcherBuilder packetId( + int packetId) { - this.qosMax = qosMax; + this.packetIds.add(packetId); + return this; + } + + public MqttSessionBeginExMatcherBuilder subscribeQosMax( + int subscribeQosMax) + { + this.subscribeQosMax = subscribeQosMax; + return this; + } + + public MqttSessionBeginExMatcherBuilder publishQosMax( + int publishQosMax) + { + this.publishQosMax = publishQosMax; return this; } @@ -1501,8 +1544,9 @@ private boolean match( final MqttSessionBeginExFW sessionBeginEx = beginEx.session(); return matchFlags(sessionBeginEx) && matchClientId(sessionBeginEx) && + matchPacketIds(sessionBeginEx) && matchExpiry(sessionBeginEx) && - matchQosMax(sessionBeginEx) && + matchSubscribeQosMax(sessionBeginEx) && matchPacketSizeMax(sessionBeginEx) && matchCapabilities(sessionBeginEx); } @@ -1513,10 +1557,23 @@ private boolean matchClientId( return clientId == null || clientId.equals(sessionBeginEx.clientId()); } - private boolean matchQosMax( + private boolean matchPacketIds( + final MqttSessionBeginExFW sessionBeginEx) + { + final PrimitiveIterator.OfInt ids = sessionBeginEx.packetIds(); + + boolean match = packetIds == null || packetIds.isEmpty(); + while (!match && ids.hasNext()) + { + match = packetIds.contains(ids.nextInt()); + } + return match; + } + + private boolean matchSubscribeQosMax( final MqttSessionBeginExFW sessionBeginEx) { - return qosMax == null || qosMax == sessionBeginEx.qosMax(); + return subscribeQosMax == null || subscribeQosMax == sessionBeginEx.subscribeQosMax(); } private boolean matchPacketSizeMax( @@ -1853,6 +1910,7 @@ public final class MqttPublishDataExMatcherBuilder private final DirectBuffer correlationRO = new UnsafeBuffer(0, 0); private Integer qos; private Integer flags; + private Integer packetId; private Integer expiryInterval = -1; private String16FW contentType; private MqttPayloadFormatFW format; @@ -1887,6 +1945,13 @@ public MqttPublishDataExMatcherBuilder flags( return this; } + public MqttPublishDataExMatcherBuilder packetId( + int packetId) + { + this.packetId = packetId; + return this; + } + public MqttPublishDataExMatcherBuilder expiryInterval( int expiryInterval) { @@ -1967,6 +2032,7 @@ private boolean match( return matchDeferred(publishDataEx) && matchQos(publishDataEx) && matchFlags(publishDataEx) && + matchPacketId(publishDataEx) && matchExpiryInterval(publishDataEx) && matchContentType(publishDataEx) && matchFormat(publishDataEx) && @@ -1993,6 +2059,12 @@ private boolean matchFlags( return flags == null || flags == data.flags(); } + private boolean matchPacketId( + final MqttPublishDataExFW data) + { + return packetId == null || packetId == data.packetId(); + } + private boolean matchExpiryInterval( final MqttPublishDataExFW data) { @@ -2031,6 +2103,113 @@ private boolean matchUserProperties( } } + public static final class MqttFlushExMatcherBuilder + { + private final DirectBuffer bufferRO = new UnsafeBuffer(); + + private final MqttFlushExFW flushExRo = new MqttFlushExFW(); + + private Integer typeId; + private Integer kind; + private Predicate caseMatcher; + + public MqttSessionFlushExMatcherBuilder session() + { + final MqttSessionFlushExMatcherBuilder matcherBuilder = new MqttSessionFlushExMatcherBuilder(); + + this.kind = MqttExtensionKind.SESSION.value(); + this.caseMatcher = matcherBuilder::match; + return matcherBuilder; + } + + public MqttFlushExMatcherBuilder typeId( + int typeId) + { + this.typeId = typeId; + return this; + } + + public BytesMatcher build() + { + return typeId != null || kind != null ? this::match : buf -> null; + } + + private MqttFlushExFW match( + ByteBuffer byteBuf) throws Exception + { + if (!byteBuf.hasRemaining()) + { + return null; + } + + bufferRO.wrap(byteBuf); + final MqttFlushExFW flushEx = flushExRo.tryWrap(bufferRO, byteBuf.position(), byteBuf.capacity()); + + if (flushEx != null && + matchTypeId(flushEx) && + matchKind(flushEx) && + matchCase(flushEx)) + { + byteBuf.position(byteBuf.position() + flushEx.sizeof()); + return flushEx; + } + + throw new Exception(flushEx.toString()); + } + + private boolean matchTypeId( + final MqttFlushExFW flushEx) + { + return typeId == null || typeId == flushEx.typeId(); + } + + private boolean matchKind( + final MqttFlushExFW flushEx) + { + return kind == null || kind == flushEx.kind(); + } + + private boolean matchCase( + final MqttFlushExFW flushEx) throws Exception + { + return caseMatcher == null || caseMatcher.test(flushEx); + } + + public final class MqttSessionFlushExMatcherBuilder + { + private Integer packetId; + + private MqttSessionFlushExMatcherBuilder() + { + } + + public MqttSessionFlushExMatcherBuilder packetId( + int packetId) + { + this.packetId = packetId; + return this; + } + + public MqttFlushExMatcherBuilder build() + { + return MqttFlushExMatcherBuilder.this; + } + + private boolean match( + MqttFlushExFW flushEx) + { + final MqttSessionFlushExFW sessionFlushEx = flushEx.session(); + return matchPacketId(sessionFlushEx); + } + + private boolean matchPacketId( + final MqttSessionFlushExFW flush) + { + return packetId == null || packetId == flush.packetId(); + } + } + } + public static class Mapper extends FunctionMapperSpi.Reflective { public Mapper() diff --git a/specs/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/specs/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl index 170f771a63..26730b7747 100644 --- a/specs/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl +++ b/specs/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl @@ -148,17 +148,21 @@ scope mqtt RETAIN (0), WILDCARD (1), SUBSCRIPTION_IDS (2), - SHARED_SUBSCRIPTIONS (3) + SHARED_SUBSCRIPTIONS (3), + REDIRECT(4) } struct MqttSessionBeginEx { uint8 flags = 0; int32 expiry = 0; - uint16 qosMax = 0; + uint16 subscribeQosMax = 0; + uint16 publishQosMax = 0; uint32 packetSizeMax = 0; uint8 capabilities = 0; string16 clientId; + int8 length; + int16[length] packetIds = null; } struct MqttSubscribeBeginEx @@ -204,6 +208,7 @@ scope mqtt int32 deferred = 0; // INIT only (TODO: move to DATA frame) uint8 qos = 0; uint8 flags = 0; + uint16 packetId = 0; int32 expiryInterval = -1; string16 contentType = null; MqttPayloadFormat format = NONE; @@ -234,6 +239,12 @@ scope mqtt union MqttFlushEx switch (uint8) extends core::stream::Extension { case 1: mqtt::stream::MqttSubscribeFlushEx subscribe; + case 2: mqtt::stream::MqttSessionFlushEx session; + } + + struct MqttSessionFlushEx + { + uint16 packetId = 0; } struct MqttSubscribeFlushEx @@ -250,11 +261,5 @@ scope mqtt INCOMPLETE(1) } - struct MqttOffsetMetadata - { - uint8 version = 1; - uint8 length; - int16[length] packetIds; - } } } diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.protocol.version.yaml b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.protocol.version.yaml new file mode 100644 index 0000000000..e6a4eeba3e --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.protocol.version.yaml @@ -0,0 +1,27 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +--- +name: test +bindings: + net0: + type: mqtt + kind: server + options: + versions: + - v5 + routes: + - exit: app0 diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.validator.yaml b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.validator.yaml index bcd24794b3..72f42032be 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.validator.yaml +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.validator.yaml @@ -44,6 +44,7 @@ bindings: topics: - name: sensor/one content: - type: test + model: test + length: 13 routes: - exit: app0 diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json index f5594c002e..d0c51afcbd 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json @@ -36,6 +36,16 @@ { "properties": { + "versions": + { + "title": "Versions", + "type": "array", + "items": + { + "type": "string", + "enum": [ "v3.1.1", "v5" ] + } + }, "authorization": { "title": "Authorizations", @@ -125,7 +135,7 @@ }, "content": { - "$ref": "#/$defs/validator/type" + "$ref": "#/$defs/validator" } } } diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt index b771dafd37..6364266fa4 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt index b5555172b2..26e83a2ab5 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt index 88889bceaa..c40045ddeb 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt @@ -31,7 +31,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .session() .flags("CLEAN_START") .expiry(0) - .qosMax(0) + .subscribeQosMax(0) .packetSizeMax(50) .clientId("client-1") .build() diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt index 12aed590e7..dfdbce6c48 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt @@ -33,7 +33,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("CLEAN_START") .expiry(0) - .qosMax(0) + .subscribeQosMax(0) .packetSizeMax(50) .clientId("client-1") .build() diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt index ed7f767334..dd1f786658 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(50) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt index 315bdcb778..0ad5bd74ac 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(50) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt index c8ed292659..aab12b50c1 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(0) + .subscribeQosMax(0) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt index 7492f9ab3e..3a405bdfc9 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(0) + .subscribeQosMax(0) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt index 365e9bb357..6d28a51db8 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt index e3b9ad7f5f..61743454bb 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt index 4902ad1d9b..0c064accae 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt index bac2170456..d5ea02814f 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt index dc21bb3d5d..ec6e3a4ef7 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt index f94abcde9e..417c798e8a 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt index 152b4defb9..bff7b12332 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt index 9efec7dc01..27c0e9894a 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.10k/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.10k/client.rpt index 609e09c1ec..ee3aedb394 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.10k/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.10k/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.10k/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.10k/server.rpt index a7718f5940..ede4b66df7 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.10k/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.10k/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt index 9aff694e50..e4095349c7 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt index bda65ca1bd..b2aa6b4baa 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt index f72e494334..135c2b1627 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt index c4e8bd3cbf..3fe24c4d94 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt @@ -33,7 +33,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt index bd3c03ccb5..3e0da6e53e 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt index 1985238c24..43a600bab2 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt index 78c686fb6e..e01510fff9 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt index 5a00d9a593..a978b46735 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt index 17b4d208ce..d75da00da8 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt index af8fb2059f..8e3d8d2955 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -87,7 +87,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client2") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt index 8d8a2ff12c..0b815f902c 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt index dbe9ebc13d..2c6bdfc015 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt index db91f65323..c8da04d36f 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt index 2f9653d000..d7021f1b9d 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.mixture.qos/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.mixture.qos/client.rpt index 5b4629bd7b..61890af08e 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.mixture.qos/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.mixture.qos/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -42,6 +42,20 @@ connected read zilla:data.empty read notify RECEIVED_SESSION_STATE +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + connect await RECEIVED_SESSION_STATE "zilla://streams/app0" diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.mixture.qos/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.mixture.qos/server.rpt index 3b23dfdc5b..fb46b4ceb8 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.mixture.qos/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.mixture.qos/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -44,6 +44,21 @@ connected write zilla:data.empty write flush +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.clients/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.clients/client.rpt index f7fca6e853..8aa78bb727 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.clients/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.clients/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-1") @@ -89,7 +89,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-2") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.clients/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.clients/server.rpt index e7dddbbc99..7f30cac1d1 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.clients/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.clients/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-1") @@ -59,7 +59,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-2") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages.timeout/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages.timeout/client.rpt index e74dacf5a2..1ebe11fb55 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages.timeout/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages.timeout/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages.timeout/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages.timeout/server.rpt index 5a04de02e6..cf35fd4e73 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages.timeout/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages.timeout/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt index 5006c59363..4ce978cd51 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt index 14fc69b797..0f5527bd63 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.properties/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.properties/client.rpt index f15914b23e..3e3389fa33 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.properties/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.properties/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.properties/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.properties/server.rpt index 946782638d..4ee206b05f 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.properties/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.properties/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos1.dup.after.puback/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos1.dup.after.puback/client.rpt index 901bfc458c..fc1613e1fc 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos1.dup.after.puback/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos1.dup.after.puback/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos1.dup.after.puback/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos1.dup.after.puback/server.rpt index 710c97ed02..951caddb35 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos1.dup.after.puback/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos1.dup.after.puback/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.ack.with.reasoncode/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.ack.with.reasoncode/client.rpt index 5b56e99d4f..7c5d6cf006 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.ack.with.reasoncode/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.ack.with.reasoncode/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -42,6 +42,20 @@ connected read zilla:data.empty read notify RECEIVED_SESSION_STATE +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + connect await RECEIVED_SESSION_STATE "zilla://streams/app0" diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.ack.with.reasoncode/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.ack.with.reasoncode/server.rpt index 1e5d66822a..d45a074fbc 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.ack.with.reasoncode/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.ack.with.reasoncode/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -44,6 +44,21 @@ connected write zilla:data.empty write flush +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.no.dupicate.before.pubrel/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.no.dupicate.before.pubrel/client.rpt index 5b56e99d4f..7c5d6cf006 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.no.dupicate.before.pubrel/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.no.dupicate.before.pubrel/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -42,6 +42,20 @@ connected read zilla:data.empty read notify RECEIVED_SESSION_STATE +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + connect await RECEIVED_SESSION_STATE "zilla://streams/app0" diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.no.dupicate.before.pubrel/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.no.dupicate.before.pubrel/server.rpt index 8f8700feb0..474b3e7e64 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.no.dupicate.before.pubrel/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.no.dupicate.before.pubrel/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -44,6 +44,21 @@ connected write zilla:data.empty write flush +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.recovery/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.recovery/client.rpt new file mode 100644 index 0000000000..bdeea89b10 --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.recovery/client.rpt @@ -0,0 +1,58 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .subscribeQosMax(2) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .packetId(1) + .packetId(2) + .packetId(3) + .build() + .build()} + +connected + +read zilla:data.empty + +# Triggered by PUBREL +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.recovery/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.recovery/server.rpt new file mode 100644 index 0000000000..754c2746d2 --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.qos2.recovery/server.rpt @@ -0,0 +1,63 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .subscribeQosMax(2) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .packetId(1) + .packetId(2) + .packetId(3) + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .session() + .packetId(1) + .build() + .build()} + + + diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.large.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.large.message/client.rpt index 0bb0fb4f0a..0f2c175c72 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.large.message/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.large.message/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.large.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.large.message/server.rpt index c7bed4e1ce..afb88e0716 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.large.message/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.large.message/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt index df76616097..57aa676b9d 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(0) + .subscribeQosMax(0) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt index 58cb238a7b..544de8fc3b 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(0) + .subscribeQosMax(0) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt index c7fbd6eb15..e91bb67055 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities(WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt index 6a569d5c82..75b2922373 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt index 9212801e9c..af4572e6ae 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt index 5b3acac928..4e9f74acdb 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.subscribe.batched/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.subscribe.batched/client.rpt index 03703f0489..174476d2d5 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.subscribe.batched/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.subscribe.batched/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.subscribe.batched/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.subscribe.batched/server.rpt index e98bd7250e..53f718055e 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.subscribe.batched/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.subscribe.batched/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.unroutable/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.unroutable/client.rpt index 5d145d66d2..c320d8da63 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.unroutable/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.unroutable/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.unroutable/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.unroutable/server.rpt index 3740651d71..7da3fc82e9 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.unroutable/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.unroutable/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.valid.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.valid.message/client.rpt index 92ba7e6735..ee091b154c 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.valid.message/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.valid.message/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.valid.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.valid.message/server.rpt index d07e0738d8..5d56222518 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.valid.message/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.valid.message/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt index 7375f857b6..1ec36e0807 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt index 462f3d08af..c4c13f0315 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt index c0d527d820..1b08738955 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt index 84b66de991..e53051d4bb 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt index c52c4f3dea..6965c126ab 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt index d3925f6b6e..c7581f5668 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt index c3f946b238..12238e3d1e 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -94,7 +94,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt index a258abfbfc..0e92c5b8aa 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -89,7 +89,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt index 665d70433e..5510810418 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -95,7 +95,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt index e5d9fa5a7d..3c74c7e45b 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -93,7 +93,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt index d3980ce8ec..706fe66a81 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt index 62685691f2..76ca3601f9 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.authorization/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.authorization/client.rpt index fdb49465b2..4a9638e8da 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.authorization/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.authorization/client.rpt @@ -31,7 +31,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.authorization/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.authorization/server.rpt index bd2eb8ff17..5dd753719d 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.authorization/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.authorization/server.rpt @@ -33,7 +33,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt index 30750fea4a..1d9ca21058 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt @@ -31,7 +31,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .session() .flags("CLEAN_START") .expiry(30) - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-1") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt index 1357014205..5596eb6ee0 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt @@ -33,7 +33,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("CLEAN_START") .expiry(30) - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-1") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.redirect.support/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.redirect.support/client.rpt new file mode 100644 index 0000000000..346b2b59d3 --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.redirect.support/client.rpt @@ -0,0 +1,41 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .subscribeQosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS", "REDIRECT") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.redirect.support/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.redirect.support/server.rpt new file mode 100644 index 0000000000..f34fba7b7f --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.redirect.support/server.rpt @@ -0,0 +1,44 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .subscribeQosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS", "REDIRECT") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt index 66bf8e17ec..b515e8cc74 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("one") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt index d6679679de..447a459379 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt @@ -33,7 +33,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .flags("CLEAN_START") .expiry(1) - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("one") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt index a8cb8971ee..d4e95f8d07 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt index 62685691f2..76ca3601f9 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt index cdfbf8a24c..a084768629 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -99,7 +99,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt index 991300b37e..7d5cd23034 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -93,7 +93,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/client.rpt index 6b7b0ceec6..d758fcb964 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/server.rpt index ec97e6429e..98bb0363f3 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/client.rpt index e4e3a14673..738bd0d94d 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/server.rpt index 20e60a236f..36f807ed77 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.publish/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.publish/client.rpt index 39b659f0e0..3763d572a6 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.publish/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.publish/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.publish/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.publish/server.rpt index c8af42e786..73b79fb79b 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.publish/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.publish/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt index 08ea0b183e..e96d972a90 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt index 4e3e92d69f..38607b3e67 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt index 90137a930e..9819844bca 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt index c14f65f6a6..517aedb68e 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt index 2ebe37fed5..ff262c1ac9 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt index 8fcf49a343..5e795bd2bb 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt index 389b218e61..05fbfda3d0 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt index c6c5002735..1d03c4dbbd 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt index d41cdfa57d..b29fabab6b 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt index 985adbda51..57f1238b19 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt index 6e5c555867..d937db9857 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt index d4b61821c8..2784d15424 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt index 50208fb289..bfe3e5967c 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt index 17011ced85..614473660a 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt index d1bee0322d..1d2eeb3cc2 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt index fc39b40d0c..f8a2ed3549 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt index 5cc4d09b24..47f831e755 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt index 3c5d0b4ff6..2f7e60007a 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt index b6130c77ac..c2a4f7ec48 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt index d2c62d1ad3..68d9488e9e 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.10k/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.10k/client.rpt index 4a152d86dc..45dd0d19f3 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.10k/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.10k/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("WILL", "CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("one") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.10k/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.10k/server.rpt index 223664f487..5a880179c4 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.10k/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.10k/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("WILL", "CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("one") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt index 2facc6a8a7..a9abc78bf6 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("WILL", "CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("one") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt index af34a7f078..5e421d77c5 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("WILL", "CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("one") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt index b12bdaf584..2fc07513ff 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("WILL", "CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("one") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt index 0623c3adfa..3f2a2bc32b 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("WILL", "CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("one") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt index 49a8c2b24c..1b2fd00874 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt @@ -46,7 +46,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("WILL", "CLEAN_START") - .qosMax(0) + .subscribeQosMax(0) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") .clientId("one") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt index a189ab5691..21def7c9b1 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt @@ -48,7 +48,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("WILL", "CLEAN_START") - .qosMax(0) + .subscribeQosMax(0) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") .clientId("one") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt index 2b7df6d001..872fca805e 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt index 68efdb3ecf..9052346f06 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt index 4bdff3a5f6..feb314aed0 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt index 639fd94f52..7b1d2cb205 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt index 345092dba3..b65ba0d319 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt index c71af4e41d..36eb537daf 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt index e667b900af..d0b232f0e6 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt index da2b521046..c71a476fe2 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt index c0bc2759ca..cb77b476c5 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt index cddcae8682..160afffa8a 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt index ea318306af..5d57863666 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt @@ -31,7 +31,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-1") @@ -91,7 +91,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-2") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt index 755712b684..f7e3a2ac38 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-1") @@ -82,7 +82,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-2") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt index 0c193373c8..9c0502ce98 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt @@ -31,7 +31,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client1") @@ -91,7 +91,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client2") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt index 2c5e931635..4020f80cfc 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client1") @@ -82,7 +82,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client2") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard.mixed.qos/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard.mixed.qos/client.rpt index 0f18d42f0c..800eb6a018 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard.mixed.qos/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard.mixed.qos/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard.mixed.qos/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard.mixed.qos/server.rpt index 03f34496ea..ce3e3fbb98 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard.mixed.qos/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard.mixed.qos/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt index 57b9d982c7..c7b6da24a5 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt index 960ab1b1e1..f29007652c 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos1/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos1/client.rpt index bdb0c80613..0aa2419763 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos1/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos1/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos1/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos1/server.rpt index f54a65c0b8..f61539f25a 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos1/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos1/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos2/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos2/client.rpt index 16e7377343..2b3db0347b 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos2/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos2/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos2/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos2/server.rpt index 4d757bf89e..30597f6e5d 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos2/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos0.published.qos2/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1.published.qos2/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1.published.qos2/client.rpt index 76b277efeb..b595b01da0 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1.published.qos2/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1.published.qos2/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1.published.qos2/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1.published.qos2/server.rpt index f3faaab253..9d2e1f587e 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1.published.qos2/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1.published.qos2/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1/client.rpt index 38fe077bf3..bffd7e51f4 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1/server.rpt index c7059330d5..689832c23c 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos1/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos2/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos2/client.rpt index f2ead70a57..5948168442 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos2/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos2/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos2/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos2/server.rpt index 2a0712e505..84124b3024 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos2/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.qos2/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt index b61bc0f9f4..787b2fb4a8 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt index 2ce1853bdb..6f716cf77c 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt index e98a052a22..faea365a10 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt index a9e75a71ef..7739bfae9d 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.mixture.qos/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.mixture.qos/client.rpt index 4ae46c8f26..46afb2d16c 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.mixture.qos/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.mixture.qos/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.mixture.qos/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.mixture.qos/server.rpt index 6f080adc52..15205cf5ed 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.mixture.qos/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.mixture.qos/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt index 23f8c0df6a..7ef648bd5f 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt index d91fcf9c07..a0f6aa0b3e 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt index 9606660cb6..403a6e7a25 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt index be569d49e7..cd91e9b8aa 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos1.unacked.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos1.unacked.message/client.rpt index 470a1c9ac3..5b9ad149cc 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos1.unacked.message/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos1.unacked.message/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -127,7 +127,7 @@ write zilla:begin.ext ${mqtt:beginEx() read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos1.unacked.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos1.unacked.message/server.rpt index 043cc582c7..651f17b6c6 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos1.unacked.message/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos1.unacked.message/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -118,7 +118,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.incomplete.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.incomplete.message/client.rpt index 78558f2300..f31fe43584 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.incomplete.message/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.incomplete.message/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -155,7 +155,7 @@ write zilla:begin.ext ${mqtt:beginEx() read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.incomplete.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.incomplete.message/server.rpt index 95a23a20d3..26780e4619 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.incomplete.message/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.incomplete.message/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -143,7 +143,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.unreceived.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.unreceived.message/client.rpt index 65994cc20c..dcecdc7da5 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.unreceived.message/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.unreceived.message/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -146,7 +146,7 @@ write zilla:begin.ext ${mqtt:beginEx() read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.unreceived.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.unreceived.message/server.rpt index 77a5fdd975..8f812311d1 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.unreceived.message/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.replay.qos2.unreceived.message/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") @@ -134,7 +134,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt index 23f920f17a..502e0151bf 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt index 3167305bbe..bc9e682451 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt index 4a57d637db..707ad620fb 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt index dbb15859eb..52b3a8ace6 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt index b3ba629994..d9cee97b78 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt index fffe14919d..b5d041e6a6 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1.v4/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1.v4/client.rpt index 78f539ab4c..1aacd9180b 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1.v4/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1.v4/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1.v4/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1.v4/server.rpt index 90a55c5b9a..ce5d89ac72 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1.v4/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1.v4/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1/client.rpt index a43531b2f4..78c5e58c4f 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1/server.rpt index f443af16c4..21dafd084b 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos1/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2.v4/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2.v4/client.rpt index cb17fffa99..ac1479c5ca 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2.v4/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2.v4/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2.v4/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2.v4/server.rpt index 69081b70c3..e39b71bfb4 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2.v4/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2.v4/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2/client.rpt index dcb5b0e6aa..f36bbedf63 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2/server.rpt index ccd38c90da..a96406d0c2 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.replay.retained.message.qos2/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt index fda2157214..5b18f30548 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt index c2e06f5318..93abfceef8 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt index b951e2bb3a..6c12edcbba 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt index cc1cd82e72..c1d3e835c9 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt index 1490553b89..c15989b169 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt index 282d76ed86..b46a12553f 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt index 7c1e2dfd88..03c0811b59 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt index 2466432741..4ac4e900e5 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt index f335981ee2..ee7d7777ca 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt index bf02ff67e7..ed5194aaa6 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt index 16b2c4eb6a..6f704f0c80 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt index aa8d721d09..7077b12957 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt index e278ab0b80..afd5bd3a8c 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt index 9c08aa337f..c4e4b38be2 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt index 0097dac0d8..50fc6e9339 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt index 3e9fdd654e..7f0628afb0 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt index 7acbbffd8c..8c5f5a7a3a 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt index 52fc4d003f..a5462a8520 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt index db6e337a9e..a0a9bba262 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt index 60a9dd2299..464d8e3a18 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt index 38baf06562..3e6743dbc4 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt index b4443804fb..44a700fe23 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt index 2a9725217a..05484623c7 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt index 9e67c51a43..cf1f5a0200 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt index 80715eb608..08f2000cc3 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt index a895e567ba..83c2080681 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt index de67a971f1..bed9a5becb 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt index 55a060d9f6..048aedf5e3 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.unroutable/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.unroutable/client.rpt index e2bd0f7a94..b55f589b79 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.unroutable/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.unroutable/client.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.unroutable/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.unroutable/server.rpt index ab045dc44b..b5ba625e7e 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.unroutable/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.unroutable/server.rpt @@ -31,7 +31,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt index aa530f336d..7a56c44233 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt index 6ac847b2ef..bbce97e4c3 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt index 71995c567e..ab43377785 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt index bdaa42e5c4..8417c7e658 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt index 59f3936622..42b63bb4bb 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt index 44d9254d3e..07399f3a0e 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt index 084eaf6b68..6ba9da3bb0 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt index 5227e3d6b5..b94f10e6b6 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt index dee38fef29..1e0bc70078 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt @@ -30,7 +30,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt index 78a6d6f6b7..8623dc2504 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt @@ -32,7 +32,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .flags("CLEAN_START") - .qosMax(2) + .subscribeQosMax(2) .packetSizeMax(66560) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/connect.unsupported.protocol.version/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/connect.unsupported.protocol.version/client.rpt new file mode 100644 index 0000000000..eaaf8a7f4a --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/connect.unsupported.protocol.version/client.rpt @@ -0,0 +1,37 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x04] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +read [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x84] # reason = unsupported protocol version + [0x00] # properties + +read closed diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/connect.unsupported.protocol.version/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/connect.unsupported.protocol.version/server.rpt new file mode 100644 index 0000000000..2498d71962 --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/connect.unsupported.protocol.version/server.rpt @@ -0,0 +1,38 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x04] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x84] # reason = unsupported protocol version + [0x00] # properties + +write close diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/publish.qos2.recovery/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/publish.qos2.recovery/client.rpt new file mode 100644 index 0000000000..207794b986 --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/publish.qos2.recovery/client.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +read [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties + +write [0x62 0x04] # PUBREL + [0x00 0x01] # packet id = 1 + [0x00] # reason code + [0x00] # properties + +read [0x70 0x03] # PUBCOMP + [0x00 0x01] # packet id = 1 + [0x00] # reason code diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/publish.qos2.recovery/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/publish.qos2.recovery/server.rpt new file mode 100644 index 0000000000..83feb8fe4f --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/publish.qos2.recovery/server.rpt @@ -0,0 +1,46 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +write [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties = none + +read [0x62 0x04] # PUBREL + [0x00 0x01] # packet id = 1 + [0x00] # reason code + [0x00] # properties + +write [0x70 0x03] # PUBCOMP + [0x00 0x01] # packet id = 1 + [0x00] # reason code diff --git a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java index 24db95d5e8..83266c4128 100644 --- a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java +++ b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java @@ -34,7 +34,7 @@ public class SchemaTest .schemaPatch("io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json") .schemaPatch("io/aklivity/zilla/specs/engine/schema/guard/test.schema.patch.json") .schemaPatch("io/aklivity/zilla/specs/engine/schema/catalog/test.schema.patch.json") - .schemaPatch("io/aklivity/zilla/specs/engine/schema/validator/test.schema.patch.json") + .schemaPatch("io/aklivity/zilla/specs/engine/schema/model/test.schema.patch.json") .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config"); @Ignore("TODO") @@ -90,6 +90,14 @@ public void shouldValidateServer() assertThat(config, not(nullValue())); } + @Test + public void shouldValidateServerProtocolVersion() + { + JsonObject config = schema.validate("server.protocol.version.yaml"); + + assertThat(config, not(nullValue())); + } + @Test public void shouldValidateServerWithAuthorizationOptions() { diff --git a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java index 1fbdbd3129..ca08c29173 100644 --- a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java +++ b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java @@ -23,11 +23,8 @@ import java.nio.ByteBuffer; import java.util.Objects; -import java.util.function.IntConsumer; -import org.agrona.BitUtil; import org.agrona.DirectBuffer; -import org.agrona.collections.IntArrayList; import org.agrona.concurrent.UnsafeBuffer; import org.junit.Test; import org.kaazing.k3po.lang.el.BytesMatcher; @@ -40,7 +37,6 @@ import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttDataExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttFlushExFW; -import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttOffsetMetadataFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttResetExFW; public class MqttFunctionsTest @@ -68,10 +64,12 @@ public void shouldEncodeMqttSessionBeginExt() .session() .flags("WILL", "CLEAN_START") .expiry(30) - .qosMax(1) + .subscribeQosMax(1) + .publishQosMax(1) .packetSizeMax(100) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") .clientId("client") + .packetId(1) .build() .build(); @@ -81,10 +79,12 @@ public void shouldEncodeMqttSessionBeginExt() assertEquals(2, mqttBeginEx.kind()); assertEquals("client", mqttBeginEx.session().clientId().asString()); assertEquals(30, mqttBeginEx.session().expiry()); - assertEquals(1, mqttBeginEx.session().qosMax()); + assertEquals(1, mqttBeginEx.session().subscribeQosMax()); + assertEquals(1, mqttBeginEx.session().publishQosMax()); assertEquals(100, mqttBeginEx.session().packetSizeMax()); assertEquals(7, mqttBeginEx.session().capabilities()); assertEquals(6, mqttBeginEx.session().flags()); + assertEquals(1, mqttBeginEx.session().packetIds().nextInt()); } @Test @@ -298,10 +298,12 @@ public void shouldMatchSessionBeginExtension() throws Exception .session() .flags("CLEAN_START") .expiry(10) - .qosMax(1) + .subscribeQosMax(1) + .publishQosMax(1) .packetSizeMax(100) .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") .clientId("client") + .packetId(1) .build() .build(); @@ -313,10 +315,12 @@ public void shouldMatchSessionBeginExtension() throws Exception .session(s -> s .flags(2) .expiry(10) - .qosMax(1) + .subscribeQosMax(1) + .publishQosMax(1) .packetSizeMax(100) .capabilities(7) - .clientId("client")) + .clientId("client") + .appendPacketIds((short) 1)) .build(); assertNotNull(matcher.match(byteBuf)); @@ -799,6 +803,7 @@ public void shouldMatchPublishDataExtension() throws Exception .deferred(100) .qos("AT_MOST_ONCE") .flags("RETAIN") + .packetId(1) .expiryInterval(20) .contentType("message") .format("TEXT") @@ -818,6 +823,7 @@ public void shouldMatchPublishDataExtension() throws Exception p.deferred(100); p.qos(0); p.flags(1); + p.packetId(1); p.expiryInterval(20); p.contentType("message"); p.format(f -> f.set(MqttPayloadFormat.TEXT)); @@ -895,6 +901,7 @@ public void shouldEncodeMqttPublishDataEx() .typeId(0) .publish() .deferred(100) + .packetId(1) .expiryInterval(15) .contentType("message") .format("TEXT") @@ -909,6 +916,7 @@ public void shouldEncodeMqttPublishDataEx() assertEquals(0, mqttPublishDataEx.typeId()); assertEquals(100, mqttPublishDataEx.publish().deferred()); + assertEquals(1, mqttPublishDataEx.publish().packetId()); assertEquals(15, mqttPublishDataEx.publish().expiryInterval()); assertEquals("message", mqttPublishDataEx.publish().contentType().asString()); assertEquals("TEXT", mqttPublishDataEx.publish().format().toString()); @@ -1197,6 +1205,43 @@ public void shouldEncodeMqttSubscribeFlushExOffsetCommit() assertEquals(1, mqttFlushEx.subscribe().state()); } + @Test + public void shouldEncodeMqttSessionFlushEx() + { + final byte[] array = MqttFunctions.flushEx() + .typeId(0) + .session() + .packetId(1) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(array); + MqttFlushExFW mqttFlushEx = new MqttFlushExFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals(0, mqttFlushEx.typeId()); + assertEquals(1, mqttFlushEx.session().packetId()); + } + + @Test + public void shouldMatchMqttPublishFlushEx() throws Exception + { + BytesMatcher matcher = MqttFunctions.matchFlushEx() + .session() + .packetId(1) + .build() + .build(); + + ByteBuffer byteBuf = ByteBuffer.allocate(1024); + + new MqttFlushExFW.Builder() + .wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) + .typeId(0x00) + .session(p -> p.packetId(1)) + .build(); + + assertNotNull(matcher.match(byteBuf)); + } + @Test public void shouldEncodeMqttSubscribeFlushExChangeFilter() { @@ -1244,6 +1289,8 @@ public void shouldEncodeMqttSessionState() .subscription("sensor/one", 1, "AT_LEAST_ONCE", "SEND_RETAINED") .subscriptionWithReasonCode("sensor/two", 1, 0) .subscription("sensor/three", 1, "EXACTLY_ONCE", "SEND_RETAINED") + .subscription("sensor/four", 1) + .subscription("sensor/five") .build(); DirectBuffer buffer = new UnsafeBuffer(array); @@ -1270,24 +1317,15 @@ public void shouldEncodeMqttSessionState() 1 == f.subscriptionId() && 2 == f.qos() && 0b0001 == f.flags())); - } - @Test - public void shouldEncodeMqttOffsetMetadata() - { - final String state = MqttFunctions.metadata() - .metadata(1) - .metadata(2) - .build(); - - final IntArrayList metadataList = new IntArrayList(); - UnsafeBuffer buffer = new UnsafeBuffer(BitUtil.fromHex(state)); - MqttOffsetMetadataFW offsetMetadata = new MqttOffsetMetadataFW().wrap(buffer, 0, buffer.capacity()); - offsetMetadata.packetIds().forEachRemaining((IntConsumer) metadataList::add); + assertNotNull(sessionState.subscriptions() + .matchFirst(f -> + "sensor/four".equals(f.pattern().asString()) && + 1 == f.subscriptionId())); - assertEquals(1, offsetMetadata.version()); - assertEquals(1, (int) metadataList.get(0)); - assertEquals(2, (int) metadataList.get(1)); + assertNotNull(sessionState.subscriptions() + .matchFirst(f -> + "sensor/five".equals(f.pattern().asString()))); } @Test diff --git a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java index ee228dbeb5..4e170660ec 100644 --- a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java +++ b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java @@ -226,6 +226,15 @@ public void shouldPublishQoS2MessageAckWithReasoncode() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/publish.qos2.recovery/client", + "${app}/publish.qos2.recovery/server"}) + public void shouldReleaseQos2PacketIdDuringRecovery() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/publish.mixture.qos/client", diff --git a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java index a52134e1da..46b6f84e6b 100644 --- a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java +++ b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java @@ -46,6 +46,15 @@ public void shouldConnect() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/session.connect.redirect.support/client", + "${app}/session.connect.redirect.support/server"}) + public void shouldConnectSupportSharding() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/session.connect.authorization/client", diff --git a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/ConnectionIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/ConnectionIT.java index bcf8ad959d..7f74cfcbcd 100644 --- a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/ConnectionIT.java +++ b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/ConnectionIT.java @@ -118,6 +118,15 @@ public void shouldRejectInvalidProtocolVersion() throws Exception k3po.finish(); } + @Test + @Specification({ + "${net}/connect.unsupported.protocol.version/client", + "${net}/connect.unsupported.protocol.version/server"}) + public void shouldRejectUnsupportedProtocolVersion() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${net}/connect.invalid.flags/client", diff --git a/specs/binding-proxy.spec/pom.xml b/specs/binding-proxy.spec/pom.xml index c6f7d19d0c..abff640752 100644 --- a/specs/binding-proxy.spec/pom.xml +++ b/specs/binding-proxy.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-sse-kafka.spec/pom.xml b/specs/binding-sse-kafka.spec/pom.xml index 4dc62bdda7..b50e120414 100644 --- a/specs/binding-sse-kafka.spec/pom.xml +++ b/specs/binding-sse-kafka.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-sse.spec/pom.xml b/specs/binding-sse.spec/pom.xml index 16c7441ae4..dc814dac5f 100644 --- a/specs/binding-sse.spec/pom.xml +++ b/specs/binding-sse.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-tcp.spec/pom.xml b/specs/binding-tcp.spec/pom.xml index 7b3bcfc125..a0c8743116 100644 --- a/specs/binding-tcp.spec/pom.xml +++ b/specs/binding-tcp.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-tls.spec/pom.xml b/specs/binding-tls.spec/pom.xml index 1a985603a2..9fd2a309f7 100644 --- a/specs/binding-tls.spec/pom.xml +++ b/specs/binding-tls.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/config/bridge.tls1.2.yaml b/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/config/bridge.tls1.2.yaml new file mode 100644 index 0000000000..2ba1cbe646 --- /dev/null +++ b/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/config/bridge.tls1.2.yaml @@ -0,0 +1,50 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +--- +name: test +vaults: + bridge: + type: filesystem + options: + keys: + store: stores/server/keys + type: pkcs12 + password: generated + trust: + store: stores/client/trust + type: pkcs12 + password: generated +bindings: + app0: + type: tls + kind: client + vault: bridge + options: + version: TLSv1.2 + sni: + - localhost + trust: + - serverca + exit: net0 + net0: + type: tls + kind: server + vault: bridge + options: + keys: + - localhost + exit: app1 diff --git a/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/config/bridge.tls1.3.yaml b/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/config/bridge.tls1.3.yaml new file mode 100644 index 0000000000..64dd7dded0 --- /dev/null +++ b/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/config/bridge.tls1.3.yaml @@ -0,0 +1,50 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +--- +name: test +vaults: + bridge: + type: filesystem + options: + keys: + store: stores/server/keys + type: pkcs12 + password: generated + trust: + store: stores/client/trust + type: pkcs12 + password: generated +bindings: + app0: + type: tls + kind: client + vault: bridge + options: + version: TLSv1.3 + sni: + - localhost + trust: + - serverca + exit: net0 + net0: + type: tls + kind: server + vault: bridge + options: + keys: + - localhost + exit: app1 diff --git a/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/config/server.keys.not.found.yaml b/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/config/server.keys.not.found.yaml new file mode 100644 index 0000000000..16f38c838f --- /dev/null +++ b/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/config/server.keys.not.found.yaml @@ -0,0 +1,35 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +--- +name: test +vaults: + server: + type: filesystem + options: + keys: + store: stores/server/keys + type: pkcs12 + password: generated +bindings: + net0: + type: tls + kind: server + vault: server + options: + keys: + - not.found + exit: app0 diff --git a/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/streams/bridge/handshake/client.rpt b/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/streams/bridge/handshake/client.rpt new file mode 100644 index 0000000000..bb384069d4 --- /dev/null +++ b/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/streams/bridge/handshake/client.rpt @@ -0,0 +1,24 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +connected + +write close +read closed diff --git a/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/streams/bridge/handshake/server.rpt b/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/streams/bridge/handshake/server.rpt new file mode 100644 index 0000000000..1c5c46ca51 --- /dev/null +++ b/specs/binding-tls.spec/src/main/scripts/io/aklivity/zilla/specs/binding/tls/streams/bridge/handshake/server.rpt @@ -0,0 +1,27 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property address "zilla://streams/app1" + +accept ${address} + option zilla:window 8192 + option zilla:transmission "duplex" +accepted + +connected + +read closed +write close diff --git a/specs/binding-tls.spec/src/test/java/io/aklivity/zilla/specs/binding/tls/config/SchemaTest.java b/specs/binding-tls.spec/src/test/java/io/aklivity/zilla/specs/binding/tls/config/SchemaTest.java index ae7d8206c6..16333bb48e 100644 --- a/specs/binding-tls.spec/src/test/java/io/aklivity/zilla/specs/binding/tls/config/SchemaTest.java +++ b/specs/binding-tls.spec/src/test/java/io/aklivity/zilla/specs/binding/tls/config/SchemaTest.java @@ -131,6 +131,14 @@ public void shouldValidateServerAlpn() assertThat(config, not(nullValue())); } + @Test + public void shouldValidateServerKeysNotFound() + { + JsonObject config = schema.validate("server.keys.not.found.yaml"); + + assertThat(config, not(nullValue())); + } + @Test public void shouldValidateServerMutual() { diff --git a/specs/binding-tls.spec/src/test/java/io/aklivity/zilla/specs/binding/tls/stream/BridgeIT.java b/specs/binding-tls.spec/src/test/java/io/aklivity/zilla/specs/binding/tls/stream/BridgeIT.java new file mode 100644 index 0000000000..70c654fb78 --- /dev/null +++ b/specs/binding-tls.spec/src/test/java/io/aklivity/zilla/specs/binding/tls/stream/BridgeIT.java @@ -0,0 +1,49 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.specs.binding.tls.stream; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.ScriptProperty; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +public class BridgeIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("bridge", "io/aklivity/zilla/specs/binding/tls/streams/bridge"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(10, SECONDS)); + + @Rule + public final TestRule chain = outerRule(k3po).around(timeout); + + @Test + @Specification({ + "${bridge}/handshake/client", + "${bridge}/handshake/server"}) + @ScriptProperty("address \"zilla://streams/app0\"") + public void shouldHandshake() throws Exception + { + k3po.finish(); + } +} diff --git a/specs/binding-ws.spec/pom.xml b/specs/binding-ws.spec/pom.xml index cfe1c92f49..902466602a 100644 --- a/specs/binding-ws.spec/pom.xml +++ b/specs/binding-ws.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/engine.spec/pom.xml b/specs/engine.spec/pom.xml index d811f06a3f..ca9a43044f 100644 --- a/specs/engine.spec/pom.xml +++ b/specs/engine.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/binding/test.schema.patch.json b/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/binding/test.schema.patch.json index f7b936d80c..069578e870 100644 --- a/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/binding/test.schema.patch.json +++ b/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/binding/test.schema.patch.json @@ -33,8 +33,14 @@ }, "options": { + "title": "Options", + "type": "object", "properties": { + "value": + { + "$ref": "#/$defs/converter" + }, "port": { "title": "Port", diff --git a/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/catalog/test.schema.patch.json b/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/catalog/test.schema.patch.json index fe1b532914..0406d3975f 100644 --- a/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/catalog/test.schema.patch.json +++ b/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/catalog/test.schema.patch.json @@ -31,9 +31,17 @@ { "properties": { + "subject": + { + "type": "string" + }, "schema": { "type": "string" + }, + "id": + { + "type": "integer" } }, "additionalProperties": false diff --git a/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/engine.schema.json b/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/engine.schema.json index b8dfd697c1..b1ef9202ab 100644 --- a/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/engine.schema.json +++ b/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/engine.schema.json @@ -222,6 +222,87 @@ [ ] }, + "cataloged": + { + "oneOf": + [ + { + "type": "object", + "properties": + { + "id": + { + "type": "integer" + } + }, + "required": + [ + "id" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "schema": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "schema" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "strategy": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "strategy" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "subject": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "subject" + ], + "additionalProperties": false + } + ] + }, "binding": { "title": "Binding", @@ -233,6 +314,22 @@ "title": "Vault", "type": "string" }, + "catalog": + { + "type": "object", + "patternProperties": + { + "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": + { + "type": "array", + "items": + { + "$ref": "#/$defs/cataloged" + } + } + }, + "maxProperties": 1 + }, "type": { "title": "Type", @@ -363,136 +460,50 @@ [ ] }, - "validator": + "converter": { - "type": + "type": "object", + "properties": { - "oneOf": - [ - { - "$ref": "#/$defs/validator/types" - }, - { - "type": "object", - "properties": - { - "type": - { - "$ref": "#/$defs/validator/types" - }, - "encoding": - { - "type": "string", - "enum": - [ - "utf_8" - ] - }, - "catalog": - { - "type": "object", - "patternProperties": - { - "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": - { - "type": "array", - "items": - { - "$ref": "#/$defs/validator/schema" - } - } - } - } - }, - "additionalProperties": false - } - ] + "model": + { + "$ref": "#/$defs/converter/types" + } }, + "required": + [ + "model" + ], + "allOf": + [ + ], "types": { "type": "string", "enum": [] + } + }, + "validator": + { + "type": "object", + "properties": + { + "model": + { + "$ref": "#/$defs/validator/types" + } }, - "schema": + "required": + [ + "model" + ], + "allOf": + [ + ], + "types": { - "oneOf": - [ - { - "type": "object", - "properties": - { - "id": - { - "type": "integer" - } - }, - "required": - [ - "id" - ], - "additionalProperties": false - }, - { - "type": "object", - "properties": - { - "schema": - { - "type": "string" - }, - "version": - { - "type": "string", - "default": "latest" - } - }, - "required": - [ - "schema" - ], - "additionalProperties": false - }, - { - "type": "object", - "properties": - { - "strategy": - { - "type": "string" - }, - "version": - { - "type": "string", - "default": "latest" - } - }, - "required": - [ - "strategy" - ], - "additionalProperties": false - }, - { - "type": "object", - "properties": - { - "subject": - { - "type": "string" - }, - "version": - { - "type": "string", - "default": "latest" - } - }, - "required": - [ - "subject" - ], - "additionalProperties": false - } - ] + "type": "string", + "enum": [] } } } diff --git a/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/model/test.schema.patch.json b/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/model/test.schema.patch.json new file mode 100644 index 0000000000..8fb9fd98ff --- /dev/null +++ b/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/model/test.schema.patch.json @@ -0,0 +1,270 @@ +[ + { + "op": "add", + "path": "/$defs/converter/types/enum/-", + "value": "test" + }, + { + "op": "add", + "path": "/$defs/converter/allOf/-", + "value": + { + "if": + { + "properties": + { + "model": + { + "const": "test" + } + } + }, + "then": + { + "properties": + { + "model": + { + "const": "test" + }, + "length": + { + "type": "integer" + }, + "capability": + { + "type": "string" + }, + "catalog": + { + "type": "object", + "patternProperties": + { + "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": + { + "type": "array", + "items": + { + "oneOf": + [ + { + "type": "object", + "properties": + { + "id": + { + "type": "integer" + } + }, + "required": + [ + "id" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "schema": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "schema" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "strategy": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "strategy" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "subject": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "subject" + ], + "additionalProperties": false + } + ] + } + } + }, + "maxProperties": 1 + } + }, + "additionalProperties": false + } + } + }, + { + "op": "add", + "path": "/$defs/validator/types/enum/-", + "value": "test" + }, + { + "op": "add", + "path": "/$defs/validator/allOf/-", + "value": + { + "if": + { + "properties": + { + "model": + { + "const": "test" + } + } + }, + "then": + { + "properties": + { + "model": + { + "const": "test" + }, + "length": + { + "type": "integer" + }, + "capability": + { + "type": "string" + }, + "catalog": + { + "type": "object", + "patternProperties": + { + "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": + { + "type": "array", + "items": + { + "oneOf": + [ + { + "type": "object", + "properties": + { + "id": + { + "type": "integer" + } + }, + "required": + [ + "id" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "schema": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "schema" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "strategy": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "strategy" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": + { + "subject": + { + "type": "string" + }, + "version": + { + "type": "string", + "default": "latest" + } + }, + "required": + [ + "subject" + ], + "additionalProperties": false + } + ] + } + } + }, + "maxProperties": 1 + } + }, + "additionalProperties": false + } + } + } +] diff --git a/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/validator/test.schema.patch.json b/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/validator/test.schema.patch.json deleted file mode 100644 index 1ccacfa8e3..0000000000 --- a/specs/engine.spec/src/main/scripts/io/aklivity/zilla/specs/engine/schema/validator/test.schema.patch.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "op": "add", - "path": "/$defs/validator/types/enum/-", - "value": "test" - } -] diff --git a/specs/engine.spec/src/test/java/io/aklivity/zilla/specs/engine/config/SchemaTest.java b/specs/engine.spec/src/test/java/io/aklivity/zilla/specs/engine/config/SchemaTest.java index e9ab69d838..c4da35cab3 100644 --- a/specs/engine.spec/src/test/java/io/aklivity/zilla/specs/engine/config/SchemaTest.java +++ b/specs/engine.spec/src/test/java/io/aklivity/zilla/specs/engine/config/SchemaTest.java @@ -33,7 +33,7 @@ public class SchemaTest .schemaPatch("io/aklivity/zilla/specs/engine/schema/exporter/test.schema.patch.json") .schemaPatch("io/aklivity/zilla/specs/engine/schema/guard/test.schema.patch.json") .schemaPatch("io/aklivity/zilla/specs/engine/schema/metrics/test.schema.patch.json") - .schemaPatch("io/aklivity/zilla/specs/engine/schema/validator/test.schema.patch.json") + .schemaPatch("io/aklivity/zilla/specs/engine/schema/model/test.schema.patch.json") .schemaPatch("io/aklivity/zilla/specs/engine/schema/vault/test.schema.patch.json") .schemaPatch("io/aklivity/zilla/specs/engine/schema/catalog/test.schema.patch.json") .configurationRoot("io/aklivity/zilla/specs/engine/config"); diff --git a/specs/exporter-prometheus.spec/pom.xml b/specs/exporter-prometheus.spec/pom.xml index e9626041f2..02d957b95c 100644 --- a/specs/exporter-prometheus.spec/pom.xml +++ b/specs/exporter-prometheus.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/guard-jwt.spec/pom.xml b/specs/guard-jwt.spec/pom.xml index 4d79951dcf..7c09c353cf 100644 --- a/specs/guard-jwt.spec/pom.xml +++ b/specs/guard-jwt.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/metrics-grpc.spec/pom.xml b/specs/metrics-grpc.spec/pom.xml index 66f8c3b2f2..988702112f 100644 --- a/specs/metrics-grpc.spec/pom.xml +++ b/specs/metrics-grpc.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/metrics-http.spec/pom.xml b/specs/metrics-http.spec/pom.xml index 5fd71753a3..172d8f4bd9 100644 --- a/specs/metrics-http.spec/pom.xml +++ b/specs/metrics-http.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/metrics-stream.spec/pom.xml b/specs/metrics-stream.spec/pom.xml index 3d3b154ddb..f06fb250f7 100644 --- a/specs/metrics-stream.spec/pom.xml +++ b/specs/metrics-stream.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/pom.xml b/specs/pom.xml index 49227d6f7e..7dc0fc7cc5 100644 --- a/specs/pom.xml +++ b/specs/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - 0.9.66 + 0.9.67 ../pom.xml diff --git a/specs/vault-filesystem.spec/pom.xml b/specs/vault-filesystem.spec/pom.xml index d9715f36f5..41b384fed6 100644 --- a/specs/vault-filesystem.spec/pom.xml +++ b/specs/vault-filesystem.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - 0.9.66 + 0.9.67 ../pom.xml