From b239bbfc4c93132688e4deef341afae6096c72e8 Mon Sep 17 00:00:00 2001 From: Rohit Sanjay Date: Fri, 11 Oct 2024 00:27:40 -0700 Subject: [PATCH] Implement Kafka REST from scratch (#74) * Add openapi generation and implement basic endpoint * Implement remaining endpoints * add api spec * Add cluster api and integration tests * fix null authorizedOperations * Add metadata and related urls to response * Introduce AdminClients to cache AdminClient by connection id and cluster id * refactor * broken wide * wip: needs refactor * works again * use constant var * Add tests for cluster API * Support confluent local using internal kafka rest * revert file changes * make util cass final * Add docstrings * fix * Reduce AdminClient request timeout to 10 seconds and handle exception * Use Caffeine to do time-based eviction on AdminClient instances * remove unused import * remove TODO * Put unrelated openapi spec changes back in place * Add exception mapper for UnsupportedOperationException * Set AdminClient timeout to 3 seconds * Implement Kafka REST from scratch * Add openapi generation and implement basic endpoint * Implement remaining endpoints * add api spec * Add cluster api and integration tests * fix null authorizedOperations * Add metadata and related urls to response * Introduce AdminClients to cache AdminClient by connection id and cluster id * refactor * broken wide * wip: needs refactor * works again * use constant var * Add tests for cluster API * Support confluent local using internal kafka rest * revert file changes * make util cass final * Add docstrings * fix * Reduce AdminClient request timeout to 10 seconds and handle exception * Use Caffeine to do time-based eviction on AdminClient instances * remove unused import * remove TODO * Put unrelated openapi spec changes back in place * Add exception mapper for UnsupportedOperationException * Wait until kafka-rest has started up * Hide internal kafka routes * Incorporate PR feedback * Fix graalvm/caffeine compilation * revert openapi files * remove newline * Add exception mapper for kafka ApiException * Set content-length only if transfer-encoding is not set * Support camelCase includeAuthorizedOperations query param in list topics * remove unused params * revert processProxyResponse changes * fix wording * Address reviewer feedback --- kafka-rest.openapi.yaml | 4545 +++++++++++++++++ pom.xml | 56 + .../restapi/cache/AdminClients.java | 53 + .../idesidecar/restapi/cache/Clients.java | 140 + .../restapi/cache/ClusterCache.java | 17 + .../restapi/cache/SchemaRegistryClients.java | 13 + .../restapi/exceptions/ExceptionMappers.java | 83 + .../filters/ConnectionIdHeaderFilter.java | 64 + .../restapi/kafkarest/ClusterManager.java | 15 + .../restapi/kafkarest/ClusterManagerImpl.java | 171 + .../restapi/kafkarest/ClusterV3ApiImpl.java | 25 + .../restapi/kafkarest/RelationshipUtil.java | 107 + .../restapi/kafkarest/TopicManager.java | 23 + .../restapi/kafkarest/TopicManagerImpl.java | 168 + .../restapi/kafkarest/TopicV3ApiImpl.java | 50 + .../messageviewer/SchemaRegistryClients.java | 99 - .../ConfluentCloudConsumeStrategy.java | 2 +- .../ConfluentLocalConsumeStrategy.java | 2 +- .../restapi/models/graph/KafkaCluster.java | 3 +- .../processors/ClusterProxyProcessor.java | 1 - .../ConfluentLocalKafkaClusterStrategy.java | 76 +- .../resources/ClusterRestProxyResource.java | 11 +- .../idesidecar/restapi/util/MutinyUtil.java | 21 + .../caffeine/reflect-config.json | 26 + src/main/resources/application.yml | 17 + .../SchemaRegistryClientsTest.java | 6 +- .../kafkarest/api/ClusterV3ApiImplIT.java | 103 + .../kafkarest/api/KafkaRestTestBed.java | 75 + .../kafkarest/api/TopicV3ApiImplIT.java | 141 + .../messageviewer/SimpleConsumerIT.java | 4 +- .../strategy/ClusterStrategyTest.java | 24 +- ...onfluentLocalKafkaClusterStrategyTest.java | 39 +- .../ClusterRestProxyResourceTest.java | 154 - ...luentLocalKafkaWithRestProxyContainer.java | 4 +- .../restapi/util/ConfluentLocalTestBed.java | 53 +- .../idesidecar/restapi/util/KafkaTestBed.java | 73 + 36 files changed, 6091 insertions(+), 373 deletions(-) create mode 100644 kafka-rest.openapi.yaml create mode 100644 src/main/java/io/confluent/idesidecar/restapi/cache/AdminClients.java create mode 100644 src/main/java/io/confluent/idesidecar/restapi/cache/Clients.java create mode 100644 src/main/java/io/confluent/idesidecar/restapi/cache/SchemaRegistryClients.java create mode 100644 src/main/java/io/confluent/idesidecar/restapi/filters/ConnectionIdHeaderFilter.java create mode 100644 src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterManager.java create mode 100644 src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterManagerImpl.java create mode 100644 src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterV3ApiImpl.java create mode 100644 src/main/java/io/confluent/idesidecar/restapi/kafkarest/RelationshipUtil.java create mode 100644 src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicManager.java create mode 100644 src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicManagerImpl.java create mode 100644 src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicV3ApiImpl.java delete mode 100644 src/main/java/io/confluent/idesidecar/restapi/messageviewer/SchemaRegistryClients.java create mode 100644 src/main/java/io/confluent/idesidecar/restapi/util/MutinyUtil.java create mode 100644 src/main/resources/META-INF/native-image/com.github.ben-manes.caffeine/caffeine/reflect-config.json rename src/test/java/io/confluent/idesidecar/restapi/{messageviewer => cache}/SchemaRegistryClientsTest.java (96%) create mode 100644 src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/ClusterV3ApiImplIT.java create mode 100644 src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/KafkaRestTestBed.java create mode 100644 src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/TopicV3ApiImplIT.java create mode 100644 src/test/java/io/confluent/idesidecar/restapi/util/KafkaTestBed.java diff --git a/kafka-rest.openapi.yaml b/kafka-rest.openapi.yaml new file mode 100644 index 00000000..9fd0449c --- /dev/null +++ b/kafka-rest.openapi.yaml @@ -0,0 +1,4545 @@ +# Taken from https://github.com/confluentinc/kafka-rest/blob/b7b1bba059e878dd28d2dbc5c51d25d3d747bd16/api/v3/openapi.yaml +openapi: 3.0.0 + +info: + title: REST Admin API + version: 3.0.0 + contact: + name: Kafka REST Team + x-api-group: v3 + x-audience: external-public + x-tag-group: Kafka API (%s) + +paths: + /internal/kafka/v3/clusters: + x-audience: component-internal + get: + summary: 'List Clusters' + operationId: listKafkaClusters + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + 'Return a list of known Kafka clusters. Currently both Kafka and Kafka REST + Proxy are only aware of the Kafka cluster pointed at by the + ``bootstrap.servers`` configuration. Therefore only one Kafka cluster will be returned in the + response.' + tags: + - Cluster (v3) + responses: + '200': + $ref: '#/components/responses/ListClustersResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}: + parameters: + - $ref: '#/components/parameters/ClusterId' + + get: + summary: 'Get Cluster' + operationId: getKafkaCluster + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the Kafka cluster with the specified ``cluster_id``. + tags: + - Cluster (v3) + responses: + '200': + $ref: '#/components/responses/GetClusterResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/acls:batch: + parameters: + - $ref: '#/components/parameters/ClusterId' + + post: + summary: 'Batch Create ACLs' + operationId: batchCreateKafkaAcls + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Create ACLs. + tags: + - ACL (v3) + requestBody: + $ref: '#/components/requestBodies/BatchCreateAclRequest' + responses: + '201': + description: 'Created' + '400': + $ref: '#/components/responses/BadRequestErrorResponse_CreateAcls' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/acls: + parameters: + - $ref: '#/components/parameters/ClusterId' + + get: + summary: 'List ACLs' + operationId: getKafkaAcls + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return a list of ACLs that match the search criteria. + tags: + - ACL (v3) + parameters: + - $ref: '#/components/parameters/AclResourceType' + - $ref: '#/components/parameters/AclResourceName' + - $ref: '#/components/parameters/AclPatternType' + - $ref: '#/components/parameters/AclPrincipal' + - $ref: '#/components/parameters/AclHost' + - $ref: '#/components/parameters/AclOperation' + - $ref: '#/components/parameters/AclPermission' + responses: + '200': + $ref: '#/components/responses/SearchAclsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + post: + summary: 'Create an ACL' + operationId: createKafkaAcls + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Create an ACL. + tags: + - ACL (v3) + requestBody: + $ref: '#/components/requestBodies/CreateAclRequest' + responses: + '201': + description: 'Created' + '400': + $ref: '#/components/responses/BadRequestErrorResponse_CreateAcls' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + delete: + summary: 'Delete ACLs' + operationId: deleteKafkaAcls + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Delete the ACLs that match the search criteria. + tags: + - ACL (v3) + parameters: + - $ref: '#/components/parameters/AclResourceTypeRequired' + - $ref: '#/components/parameters/AclResourceName' + - $ref: '#/components/parameters/AclPatternTypeRequired' + - $ref: '#/components/parameters/AclPrincipal' + - $ref: '#/components/parameters/AclHost' + - $ref: '#/components/parameters/AclOperationRequired' + - $ref: '#/components/parameters/AclPermissionRequired' + responses: + '200': + $ref: '#/components/responses/DeleteAclsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse_DeleteAcls' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/broker-configs: + parameters: + - $ref: '#/components/parameters/ClusterId' + + get: + summary: 'List Dynamic Broker Configs' + operationId: listKafkaClusterConfigs + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return a list of dynamic cluster-wide broker configuration parameters for the specified Kafka + cluster. Returns an empty list if there are no dynamic cluster-wide broker configuration parameters. + + tags: + - Configs (v3) + responses: + '200': + $ref: '#/components/responses/ListClusterConfigsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/broker-configs:alter: + parameters: + - $ref: '#/components/parameters/ClusterId' + + post: + summary: 'Batch Alter Dynamic Broker Configs' + operationId: updateKafkaClusterConfigs + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Update or delete a set of dynamic cluster-wide broker configuration parameters. + tags: + - Configs (v3) + requestBody: + $ref: '#/components/requestBodies/AlterClusterConfigBatchRequest' + responses: + '204': + description: 'No Content' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/broker-configs/{name}: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/ConfigName' + + get: + summary: 'Get Dynamic Broker Config' + operationId: getKafkaClusterConfig + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the dynamic cluster-wide broker configuration parameter specified by ``name``. + tags: + - Configs (v3) + responses: + '200': + $ref: '#/components/responses/GetClusterConfigResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + put: + summary: 'Update Dynamic Broker Config' + operationId: updateKafkaClusterConfig + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Update the dynamic cluster-wide broker configuration parameter specified by ``name``. + tags: + - Configs (v3) + requestBody: + $ref: '#/components/requestBodies/UpdateClusterConfigRequest' + responses: + '204': + description: 'No Content' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + delete: + summary: 'Reset Dynamic Broker Config' + operationId: deleteKafkaClusterConfig + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Reset the configuration parameter specified by ``name`` to its + default value by deleting a dynamic cluster-wide configuration. + tags: + - Configs (v3) + responses: + '204': + description: 'No Content' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/brokers: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + + get: + summary: 'List Brokers' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return a list of brokers that belong to the specified + Kafka cluster. + tags: + - Broker (v3) + responses: + '200': + $ref: '#/components/responses/ListBrokersResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/brokers/{broker_id}: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/BrokerId' + + get: + summary: 'Get Broker' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the broker specified by ``broker_id``. + tags: + - Broker (v3) + responses: + '200': + $ref: '#/components/responses/GetBrokerResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/brokers/-/configs: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + + get: + summary: 'List Dynamic Broker Configs' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of dynamic configuration parameters for all the brokers in the given Kafka cluster. + tags: + - Configs (v3) + responses: + '200': + $ref: '#/components/responses/ListBrokerConfigsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/brokers/{broker_id}/configs: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/BrokerId' + + get: + summary: 'List Broker Configs' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of configuration parameters that belong to the specified Kafka broker. + tags: + - Configs (v3) + responses: + '200': + $ref: '#/components/responses/ListBrokerConfigsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/brokers/{broker_id}/configs:alter: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/BrokerId' + + post: + summary: 'Batch Alter Broker Configs' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Update or delete a set of broker configuration parameters. + tags: + - Configs (v3) + requestBody: + $ref: '#/components/requestBodies/AlterBrokerConfigBatchRequest' + responses: + '204': + description: 'No Content' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/brokers/{broker_id}/configs/{name}: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/BrokerId' + - $ref: '#/components/parameters/ConfigName' + + get: + summary: 'Get Broker Config' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the configuration parameter specified by ``name``. + tags: + - Configs (v3) + responses: + '200': + $ref: '#/components/responses/GetBrokerConfigResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + put: + summary: 'Update Broker Config' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Update the configuration parameter specified by ``name``. + tags: + - Configs (v3) + requestBody: + $ref: '#/components/requestBodies/UpdateBrokerConfigRequest' + responses: + '204': + description: 'No Content' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + delete: + summary: 'Reset Broker Config' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Reset the configuration parameter specified by ``name`` to its default value. + tags: + - Configs (v3) + responses: + '204': + description: 'No Content' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/brokers/{broker_id}/partition-replicas: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/BrokerId' + + get: + summary: 'List Replicas by Broker' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of replicas assigned to the specified broker. + tags: + - Broker (v3) + - Replica (v3) + responses: + '200': + $ref: '#/components/responses/SearchReplicasByBrokerResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/consumer-groups: + parameters: + - $ref: '#/components/parameters/ClusterId' + + get: + summary: 'List Consumer Groups' + operationId: listKafkaConsumerGroups + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of consumer groups that belong to the specified + Kafka cluster. + tags: + - Consumer Group (v3) + responses: + '200': + $ref: '#/components/responses/ListConsumerGroupsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/consumer-groups/{consumer_group_id}: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/ConsumerGroupId' + + get: + summary: 'Get Consumer Group' + operationId: getKafkaConsumerGroup + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the consumer group specified by the ``consumer_group_id``. + tags: + - Consumer Group (v3) + responses: + '200': + $ref: '#/components/responses/GetConsumerGroupResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/consumer-groups/{consumer_group_id}/consumers: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/ConsumerGroupId' + + get: + summary: 'List Consumers' + operationId: listKafkaConsumers + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return a list of consumers that belong to the specified consumer + group. + tags: + - Consumer Group (v3) + responses: + '200': + $ref: '#/components/responses/ListConsumersResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/consumer-groups/{consumer_group_id}/lag-summary: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/ConsumerGroupId' + + get: + summary: 'Get Consumer Group Lag Summary' + operationId: getKafkaConsumerGroupLagSummary + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) [![Available in dedicated clusters only](https://img.shields.io/badge/-Available%20in%20dedicated%20clusters%20only-%23bc8540)](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#dedicated-cluster) + + Return the maximum and total lag of the consumers belonging to the + specified consumer group. + tags: + - Consumer Group (v3) + responses: + '200': + $ref: '#/components/responses/GetConsumerGroupLagSummaryResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/consumer-groups/{consumer_group_id}/lags: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/ConsumerGroupId' + + get: + summary: 'List Consumer Lags' + operationId: listKafkaConsumerLags + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) [![Available in dedicated clusters only](https://img.shields.io/badge/-Available%20in%20dedicated%20clusters%20only-%23bc8540)](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#dedicated-cluster) + + Return a list of consumer lags of the consumers belonging to the + specified consumer group. + tags: + - Consumer Group (v3) + responses: + '200': + $ref: '#/components/responses/ListConsumerLagsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/consumer-groups/{consumer_group_id}/lags/{topic_name}/partitions/{partition_id}: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/ConsumerGroupId' + - $ref: '#/components/parameters/TopicName' + - $ref: '#/components/parameters/PartitionId' + + get: + summary: 'Get Consumer Lag' + operationId: getKafkaConsumerLag + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) [![Available in dedicated clusters only](https://img.shields.io/badge/-Available%20in%20dedicated%20clusters%20only-%23bc8540)](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#dedicated-cluster) + + Return the consumer lag on a partition with the given `partition_id`. + tags: + - Consumer Group (v3) + responses: + '200': + $ref: '#/components/responses/GetConsumerLagResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/consumer-groups/{consumer_group_id}/consumers/{consumer_id}: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/ConsumerGroupId' + - $ref: '#/components/parameters/ConsumerId' + + get: + summary: 'Get Consumer' + operationId: getKafkaConsumer + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the consumer specified by the ``consumer_id``. + tags: + - Consumer Group (v3) + responses: + '200': + $ref: '#/components/responses/GetConsumerResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/consumer-groups/{consumer_group_id}/consumers/{consumer_id}/assignments: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/ConsumerGroupId' + - $ref: '#/components/parameters/ConsumerId' + + get: + summary: 'List Consumer Assignments' + operationId: listKafkaConsumerAssignment + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return a list of partition assignments for the specified consumer. + tags: + - Consumer Group (v3) + responses: + '200': + $ref: '#/components/responses/ListConsumerAssignmentsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/consumer-groups/{consumer_group_id}/consumers/{consumer_id}/assignments/{topic_name}/partitions/{partition_id}: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/ConsumerGroupId' + - $ref: '#/components/parameters/ConsumerId' + - $ref: '#/components/parameters/TopicName' + - $ref: '#/components/parameters/PartitionId' + + get: + summary: 'Get Consumer Assignment' + operationId: getKafkaConsumerAssignment + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return information about the assignment for the specified consumer + to the specified partition. + tags: + - Consumer Group (v3) + responses: + '200': + $ref: '#/components/responses/GetConsumerAssignmentResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics: + parameters: + - $ref: '#/components/parameters/ClusterId' + + get: + summary: 'List Topics' + operationId: listKafkaTopics + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of topics that belong to the specified Kafka cluster. + parameters: + # See NOTE in the `IncludeAuthorizedOperationsCamelCase` parameter definition + - $ref: '#/components/parameters/IncludeAuthorizedOperationsCamelCase' + tags: + - Topic (v3) + responses: + '200': + $ref: '#/components/responses/ListTopicsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + post: + summary: 'Create Topic' + operationId: createKafkaTopic + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Create a topic. + Also supports a dry-run mode that only validates whether the topic creation would succeed + if the ``validate_only`` request property is explicitly specified and set to true. + tags: + - Topic (v3) + requestBody: + $ref: '#/components/requestBodies/CreateTopicRequest' + responses: + # returned when dry-run mode is being used and a topic has not been created + '200': + $ref: '#/components/responses/CreateTopicResponse' + # returned in regular mode when a topic has been created + '201': + $ref: '#/components/responses/CreateTopicResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse_CreateTopic' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + + get: + summary: 'Get Topic' + operationId: getKafkaTopic + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the topic with the given `topic_name`. + tags: + - Topic (v3) + parameters: + - $ref: '#/components/parameters/IncludeAuthorizedOperations' + responses: + '200': + $ref: '#/components/responses/GetTopicResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + patch: + summary: 'Update Partition Count' + operationId: updatePartitionCountKafkaTopic + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Increase the number of partitions for a topic. + tags: + - Topic (v3) + requestBody: + $ref: '#/components/requestBodies/UpdatePartitionCountRequest' + responses: + '200': + $ref: '#/components/responses/GetTopicResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse_UpdatePartitionCountTopic' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + delete: + summary: 'Delete Topic' + operationId: deleteKafkaTopic + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Delete the topic with the given `topic_name`. + tags: + - Topic (v3) + responses: + '204': + description: 'No Content' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}/configs: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + + get: + summary: 'List Topic Configs' + operationId: listKafkaTopicConfigs + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of configuration parameters that belong to the specified topic. + tags: + - Configs (v3) + responses: + '200': + $ref: '#/components/responses/ListTopicConfigsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}/configs:alter: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + + post: + summary: 'Batch Alter Topic Configs' + operationId: updateKafkaTopicConfigBatch + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Update or delete a set of topic configuration parameters. + Also supports a dry-run mode that only validates whether the operation would succeed if the + ``validate_only`` request property is explicitly specified and set to true. + tags: + - Configs (v3) + requestBody: + $ref: '#/components/requestBodies/AlterTopicConfigBatchRequest' + responses: + # returned in both regular and dry-run modes + '204': + description: 'No Content' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}/configs/{name}: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + - $ref: '#/components/parameters/ConfigName' + + get: + summary: 'Get Topic Config' + operationId: getKafkaTopicConfig + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the configuration parameter with the given `name`. + tags: + - Configs (v3) + responses: + '200': + $ref: '#/components/responses/GetTopicConfigResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + put: + summary: 'Update Topic Config' + operationId: updateKafkaTopicConfig + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Update the configuration parameter with given `name`. + tags: + - Configs (v3) + requestBody: + $ref: '#/components/requestBodies/UpdateTopicConfigRequest' + responses: + '204': + description: 'No Content' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + delete: + summary: 'Reset Topic Config' + operationId: deleteKafkaTopicConfig + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Reset the configuration parameter with given `name` to its default value. + tags: + - Configs (v3) + responses: + '204': + description: 'No Content' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}/partitions: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + + get: + summary: 'List Partitions' + operationId: listKafkaPartitions + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of partitions that belong to the specified topic. + tags: + - Partition (v3) + responses: + '200': + $ref: '#/components/responses/ListPartitionsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}/partitions/{partition_id}: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + - $ref: '#/components/parameters/PartitionId' + + get: + summary: 'Get Partition' + operationId: getKafkaPartition + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the partition with the given `partition_id`. + tags: + - Partition (v3) + responses: + '200': + $ref: '#/components/responses/GetPartitionResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/-/partitions/-/reassignment: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + + get: + summary: 'List All Replica Reassignments' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of all ongoing replica reassignments in the given Kafka cluster. + tags: + - Partition (v3) + responses: + '200': + $ref: '#/components/responses/ListAllReassignmentsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}/partitions/-/reassignment: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + + get: + summary: 'List Replica Reassignments By Topic' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of ongoing replica reassignments for the given topic. + tags: + - Partition (v3) + responses: + '200': + $ref: '#/components/responses/SearchReassignmentsByTopicResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}/partitions/{partition_id}/reassignment: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + - $ref: '#/components/parameters/PartitionId' + + get: + summary: 'Get Replica Reassignments' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of ongoing replica reassignments for the given partition. + tags: + - Partition (v3) + responses: + '200': + $ref: '#/components/responses/GetReassignmentResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}/partitions/{partition_id}/replicas: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + - $ref: '#/components/parameters/PartitionId' + + get: + summary: 'List Replicas' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of replicas for the specified partition. + tags: + - Replica (v3) + responses: + '200': + $ref: '#/components/responses/ListReplicasResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}/partitions/{partition_id}/replicas/{broker_id}: + x-audience: component-internal + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + - $ref: '#/components/parameters/PartitionId' + - $ref: '#/components/parameters/BrokerId' + + get: + summary: 'Get Replica' + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the replica for the specified partition assigned to the specified broker. + tags: + - Replica (v3) + responses: + '200': + $ref: '#/components/responses/GetReplicaResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/-/configs: + parameters: + - $ref: '#/components/parameters/ClusterId' + + get: + summary: 'List All Topic Configs' + operationId: listKafkaAllTopicConfigs + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Return the list of configuration parameters for all topics hosted by the specified + cluster. + tags: + - Configs (v3) + responses: + '200': + $ref: '#/components/responses/ListTopicConfigsResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}/records: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + + post: + summary: 'Produce Records' + operationId: produceRecord + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Produce records to the given topic, returning delivery reports for each + record produced. This API can be used in streaming mode by setting "Transfer-Encoding: + chunked" header. For as long as the connection is kept open, the server will + keep accepting records. Records are streamed to and from the server as Concatenated + JSON. For each record sent to the server, the server will + asynchronously send back a delivery report, in the same order, each with its own + error_code. An error_code of 200 indicates success. The HTTP status code will be HTTP + 200 OK as long as the connection is successfully established. To identify records + that have encountered an error, check the error_code of each delivery report. + + Note that the cluster_id is validated only when running in Confluent + Cloud. + + tags: + - Records (v3) + requestBody: + $ref: '#/components/requestBodies/ProduceRequest' + responses: + '200': + $ref: '#/components/responses/ProduceResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse_ProduceRecords' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '413': + $ref: '#/components/responses/RequestEntityTooLargeErrorResponse' + '415': + $ref: '#/components/responses/UnsupportedMediaTypeErrorResponse' + '422': + $ref: '#/components/responses/UnprocessableEntity_ProduceRecord' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + + /internal/kafka/v3/clusters/{cluster_id}/topics/{topic_name}/records:batch: + parameters: + - $ref: '#/components/parameters/ClusterId' + - $ref: '#/components/parameters/TopicName' + + post: + summary: 'Produce Records Batch' + operationId: produceRecordsBatch + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + + Produce a batch of records to the given topic, returning delivery reports for each record produced. + To identify records that have encountered an error, check the error_code of each delivery report. + tags: + - Records (v3) + requestBody: + $ref: '#/components/requestBodies/ProduceBatchRequest' + responses: + '207': + $ref: '#/components/responses/ProduceBatchResponse' + '400': + $ref: '#/components/responses/BadRequestErrorResponse_ProduceRecords' + '401': + $ref: '#/components/responses/UnauthorizedErrorResponse' + '403': + $ref: '#/components/responses/ForbiddenErrorResponse' + '404': + $ref: '#/components/responses/NotFoundErrorResponse' + '415': + $ref: '#/components/responses/UnsupportedMediaTypeErrorResponse' + '429': + $ref: '#/components/responses/TooManyRequestsErrorResponse' + '5XX': + $ref: '#/components/responses/ServerErrorResponse' + +components: + parameters: + AclHost: + name: 'host' + description: 'The ACL host.' + in: query + required: false + schema: + type: string + + AclOperation: + name: 'operation' + description: 'The ACL operation.' + in: query + required: false + schema: + $ref: '#/components/schemas/AclOperation' + + AclOperationRequired: + name: 'operation' + description: 'The ACL operation.' + in: query + required: true + schema: + $ref: '#/components/schemas/AclOperation' + + AclPatternType: + name: 'pattern_type' + description: 'The ACL pattern type.' + in: query + required: false + schema: + $ref: '#/components/schemas/AclPatternType' + + AclPatternTypeRequired: + name: 'pattern_type' + description: 'The ACL pattern type.' + in: query + required: true + schema: + $ref: '#/components/schemas/AclPatternType' + + AclPermission: + name: 'permission' + description: 'The ACL permission.' + in: query + required: false + schema: + $ref: '#/components/schemas/AclPermission' + + AclPermissionRequired: + name: 'permission' + description: 'The ACL permission.' + in: query + required: true + schema: + $ref: '#/components/schemas/AclPermission' + + AclPrincipal: + name: 'principal' + description: 'The ACL principal.' + in: query + required: false + schema: + type: string + + AclResourceName: + name: 'resource_name' + description: 'The ACL resource name.' + in: query + required: false + schema: + type: string + + AclResourceType: + name: 'resource_type' + description: 'The ACL resource type.' + in: query + required: false + schema: + $ref: '#/components/schemas/AclResourceType' + + AclResourceTypeRequired: + name: 'resource_type' + description: 'The ACL resource type.' + in: query + required: true + schema: + $ref: '#/components/schemas/AclResourceType' + + BrokerId: + name: 'broker_id' + description: 'The broker ID.' + in: path + required: true + schema: + type: integer + example: 1 + + ClusterId: + name: 'cluster_id' + description: 'The Kafka cluster ID.' + in: path + required: true + schema: + type: string + example: 'cluster-1' + + ConfigName: + name: 'name' + description: 'The configuration parameter name.' + in: path + required: true + schema: + type: string + example: 'compression.type' + + ConsumerGroupId: + name: 'consumer_group_id' + description: 'The consumer group ID.' + in: path + required: true + schema: + type: string + example: 'consumer-group-1' + + ConsumerId: + name: 'consumer_id' + description: 'The consumer ID.' + in: path + required: true + schema: + type: string + example: 'consumer-1' + + IncludeAuthorizedOperations: + name: 'include_authorized_operations' + description: 'Specify if authorized operations should be included in the response.' + in: query + required: false + schema: + type: boolean + + # NOTE: Kafka-rest's OpenAPI spec has a known undocumented inconsistency where + # the list topics operation expects the includeAuthorizedOperations query parameter + # whereas the get topic operation expects the include_authorized_operations query parameter. + # We declare both here in our version of the spec to ensure that it is at least documented. + # The inconsistency has been set in stone for a while and is unlikely to change in the near future. + IncludeAuthorizedOperationsCamelCase: + name: 'includeAuthorizedOperations' + description: 'Specify if authorized operations should be included in the response, camelCase style.' + in: query + required: false + schema: + type: boolean + + PartitionId: + name: 'partition_id' + description: 'The partition ID.' + in: path + required: true + schema: + type: integer + example: 1 + + TopicName: + name: 'topic_name' + description: 'The topic name.' + in: path + required: true + schema: + type: string + example: topic-1 + + schemas: + AbstractConfigData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - name + - is_default + - is_read_only + - is_sensitive + - source + - synonyms + properties: + cluster_id: + type: string + name: + type: string + value: + type: string + nullable: true + is_default: + type: boolean + is_read_only: + type: boolean + is_sensitive: + type: boolean + source: + $ref: '#/components/schemas/ConfigSource' + synonyms: + type: array + items: + $ref: '#/components/schemas/ConfigSynonymData' + + AclData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - resource_type + - resource_name + - pattern_type + - principal + - host + - operation + - permission + properties: + cluster_id: + type: string + resource_type: + $ref: '#/components/schemas/AclResourceType' + resource_name: + type: string + pattern_type: + $ref: '#/components/schemas/AclPatternType' + principal: + type: string + host: + type: string + operation: + $ref: '#/components/schemas/AclOperation' + permission: + $ref: '#/components/schemas/AclPermission' + + AclDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/AclData' + + AclOperation: + type: string + x-extensible-enum: + - UNKNOWN + - ANY + - ALL + - READ + - WRITE + - CREATE + - DELETE + - ALTER + - DESCRIBE + - CLUSTER_ACTION + - DESCRIBE_CONFIGS + - ALTER_CONFIGS + - IDEMPOTENT_WRITE + + AclPatternType: + type: string + x-extensible-enum: + - UNKNOWN + - ANY + - MATCH + - LITERAL + - PREFIXED + + AclPermission: + type: string + x-extensible-enum: + - UNKNOWN + - ANY + - DENY + - ALLOW + + AclResourceType: + type: string + enum: + - UNKNOWN + - ANY + - TOPIC + - GROUP + - CLUSTER + - TRANSACTIONAL_ID + - DELEGATION_TOKEN + + AlterConfigBatchRequestData: + type: object + required: + - data + properties: + data: + type: array + items: + type: object + required: + - name + properties: + name: + type: string + value: + type: string + nullable: true + operation: + type: string + x-extensible-enum: + - SET + - DELETE + nullable: true + validate_only: + type: boolean + + AnyValue: + nullable: true + + AuthorizedOperations: + type: array + items: + type: string + x-extensible-enum: + - UNKNOWN + - ALL + - READ + - WRITE + - CREATE + - DELETE + - ALTER + - DESCRIBE + - CLUSTER_ACTION + - DESCRIBE_CONFIGS + - ALTER_CONFIGS + - IDEMPOTENT_WRITE + + BrokerConfigData: + allOf: + - $ref: '#/components/schemas/AbstractConfigData' + - type: object + required: + - broker_id + properties: + broker_id: + type: integer + + BrokerConfigDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/BrokerConfigData' + + BrokerData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - broker_id + - configs + - partition_replicas + properties: + cluster_id: + type: string + broker_id: + type: integer + host: + type: string + nullable: true + port: + type: integer + nullable: true + rack: + type: string + nullable: true + configs: + $ref: '#/components/schemas/Relationship' + partition_replicas: + $ref: '#/components/schemas/Relationship' + + BrokerDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/BrokerData' + + ClusterConfigData: + allOf: + - $ref: '#/components/schemas/AbstractConfigData' + - type: object + required: + - config_type + properties: + config_type: + $ref: '#/components/schemas/ClusterConfigType' + + ClusterConfigDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/ClusterConfigData' + + ClusterConfigType: + type: string + x-extensible-enum: + - BROKER + + ClusterData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - acls + - brokers + - broker_configs + - consumer_groups + - topics + - partition_reassignments + properties: + cluster_id: + type: string + controller: + $ref: '#/components/schemas/Relationship' + acls: + $ref: '#/components/schemas/Relationship' + brokers: + $ref: '#/components/schemas/Relationship' + broker_configs: + $ref: '#/components/schemas/Relationship' + consumer_groups: + $ref: '#/components/schemas/Relationship' + topics: + $ref: '#/components/schemas/Relationship' + partition_reassignments: + $ref: '#/components/schemas/Relationship' + + ClusterDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/ClusterData' + + CreateAclRequestData: + type: object + required: + - resource_type + - resource_name + - pattern_type + - principal + - host + - operation + - permission + properties: + resource_type: + $ref: '#/components/schemas/AclResourceType' + resource_name: + type: string + pattern_type: + $ref: '#/components/schemas/AclPatternType' + principal: + type: string + host: + type: string + operation: + $ref: '#/components/schemas/AclOperation' + permission: + $ref: '#/components/schemas/AclPermission' + + CreateAclRequestDataList: + allOf: + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/CreateAclRequestData' + + CreateTopicRequestData: + type: object + required: + - topic_name + properties: + topic_name: + type: string + partitions_count: + type: integer + replication_factor: + type: integer + replicas_assignments: + # This is a map from partition id (string) to array of broker ids (integer) + type: object + additionalProperties: + type: array + items: + type: integer + configs: + type: array + items: + type: object + required: + - name + properties: + name: + type: string + value: + type: string + nullable: true + validate_only: + type: boolean + + ConfigSource: + type: string + x-extensible-enum: + - DYNAMIC_CLUSTER_LINK_CONFIG + - DYNAMIC_TOPIC_CONFIG + - DYNAMIC_BROKER_LOGGER_CONFIG + - DYNAMIC_BROKER_CONFIG + - DYNAMIC_DEFAULT_BROKER_CONFIG + - STATIC_BROKER_CONFIG + - DEFAULT_CONFIG + - UNKNOWN + + ConfigSynonymData: + type: object + required: + - name + - source + properties: + name: + type: string + value: + type: string + nullable: true + source: + $ref: '#/components/schemas/ConfigSource' + + ConsumerAssignmentData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - consumer_group_id + - consumer_id + - topic_name + - partition_id + - partition + - lag + properties: + cluster_id: + type: string + consumer_group_id: + type: string + consumer_id: + type: string + topic_name: + type: string + partition_id: + type: integer + partition: + $ref: '#/components/schemas/Relationship' + lag: + $ref: '#/components/schemas/Relationship' + + ConsumerAssignmentDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/ConsumerAssignmentData' + + ConsumerData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - consumer_group_id + - consumer_id + - client_id + - assignments + properties: + cluster_id: + type: string + consumer_group_id: + type: string + consumer_id: + type: string + instance_id: + type: string + nullable: true + client_id: + type: string + assignments: + $ref: '#/components/schemas/Relationship' + + ConsumerDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/ConsumerData' + + ConsumerGroupData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - consumer_group_id + - is_simple + - partition_assignor + - state + - coordinator + - consumers + - lag_summary + properties: + cluster_id: + type: string + consumer_group_id: + type: string + is_simple: + type: boolean + partition_assignor: + type: string + state: + $ref: '#/components/schemas/ConsumerGroupState' + coordinator: + $ref: '#/components/schemas/Relationship' + consumer: + $ref: '#/components/schemas/Relationship' + lag_summary: + $ref: '#/components/schemas/Relationship' + + ConsumerGroupDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/ConsumerGroupData' + + ConsumerGroupState: + type: string + x-extensible-enum: + - UNKNOWN + - PREPARING_REBALANCE + - COMPLETING_REBALANCE + - STABLE + - DEAD + - EMPTY + + ConsumerLagData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - consumer_group_id + - topic_name + - partition_id + - current_offset + - log_end_offset + - lag + - consumer_id + - client_id + properties: + cluster_id: + type: string + consumer_group_id: + type: string + topic_name: + type: string + partition_id: + type: integer + current_offset: + type: integer + format: int64 + log_end_offset: + type: integer + format: int64 + lag: + type: integer + format: int64 + consumer_id: + type: string + instance_id: + type: string + nullable: true + client_id: + type: string + + ConsumerLagDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/ConsumerLagData' + + ConsumerGroupLagSummaryData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - consumer_group_id + - max_lag_consumer_id + - max_lag_client_id + - max_lag_topic_name + - max_lag_partition_id + - max_lag + - total_lag + - max_lag_consumer + - max_lag_partition + properties: + cluster_id: + type: string + consumer_group_id: + type: string + max_lag_consumer_id: + type: string + max_lag_instance_id: + type: string + nullable: true + max_lag_client_id: + type: string + max_lag_topic_name: + type: string + max_lag_partition_id: + type: integer + max_lag: + type: integer + format: int64 + total_lag: + type: integer + format: int64 + max_lag_consumer: + $ref: '#/components/schemas/Relationship' + max_lag_partition: + $ref: '#/components/schemas/Relationship' + + Error: + type: object + required: + - error_code + - message + properties: + error_code: + type: integer + format: int32 + message: + type: string + nullable: true + + PartitionData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - topic_name + - partition_id + - replicas + - reassignment + properties: + cluster_id: + type: string + topic_name: + type: string + partition_id: + type: integer + leader: + $ref: '#/components/schemas/Relationship' + replicas: + $ref: '#/components/schemas/Relationship' + reassignment: + $ref: '#/components/schemas/Relationship' + + PartitionDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/PartitionData' + + ProduceBatchResponse: + description: Response for producing a batch of records. + type: object + properties: + successes: + description: Successful batch results. + type: array + items: + $ref: '#/components/schemas/ProduceBatchResponseSuccessEntry' + failures: + description: Failed batch results. + type: array + items: + $ref: '#/components/schemas/ProduceBatchResponseFailureEntry' + + ProduceBatchResponseFailureEntry: + type: object + required: + - id + - error_code + properties: + id: + description: Batch entry ID. + type: string + minLength: 1 + maxLength: 80 + error_code: + description: Error code. + type: integer + format: int32 + message: + description: Error message. + type: string + + ProduceBatchResponseSuccessEntry: + type: object + required: + - id + properties: + id: + description: Batch entry ID. + type: string + minLength: 1 + maxLength: 80 + cluster_id: + type: string + topic_name: + type: string + partition_id: + type: integer + format: int32 + offset: + type: integer + format: int64 + timestamp: + type: string + format: date-time + nullable: true + key: + $ref: '#/components/schemas/ProduceResponseData' + value: + $ref: '#/components/schemas/ProduceResponseData' + + ProduceResponse: + type: object + required: + - error_code + properties: + error_code: + type: integer + format: int32 + message: + type: string + cluster_id: + type: string + topic_name: + type: string + partition_id: + type: integer + format: int32 + offset: + type: integer + format: int64 + timestamp: + type: string + format: date-time + nullable: true + key: + $ref: '#/components/schemas/ProduceResponseData' + value: + $ref: '#/components/schemas/ProduceResponseData' + + ProduceResponseData: + type: object + required: + - size + - type + properties: + size: + type: integer + format: int64 + type: + type: string + x-extensible-enum: + - BINARY + - JSON + - STRING + - AVRO + - JSONSCHEMA + - PROTOBUF + nullable: true + subject: + type: string + nullable: true + schema_id: + type: integer + nullable: true + schema_version: + type: integer + nullable: true + nullable: true + + ProduceBatchRequest: + description: Request for producing a batch of records. + type: object + required: + - entries + properties: + entries: + type: array + items: + $ref: '#/components/schemas/ProduceBatchRequestEntry' + + ProduceBatchRequestEntry: + type: object + required: + - id + properties: + id: + description: Batch entry ID. + type: string + minLength: 1 + maxLength: 80 + partition_id: + type: integer + nullable: true + format: int32 + headers: + type: array + items: + $ref: '#/components/schemas/ProduceRequestHeader' + key: + $ref: '#/components/schemas/ProduceRequestData' + value: + $ref: '#/components/schemas/ProduceRequestData' + timestamp: + type: string + format: date-time + nullable: true + + ProduceRequest: + type: object + properties: + partition_id: + type: integer + nullable: true + format: int32 + headers: + type: array + items: + $ref: '#/components/schemas/ProduceRequestHeader' + key: + $ref: '#/components/schemas/ProduceRequestData' + value: + $ref: '#/components/schemas/ProduceRequestData' + timestamp: + type: string + format: date-time + nullable: true + + ProduceRequestData: + type: object + properties: + type: + type: string + x-extensible-enum: + - BINARY + - JSON + - STRING + - AVRO + - JSONSCHEMA + - PROTOBUF + subject: + type: string + nullable: true + subject_name_strategy: + type: string + x-extensible-enum: + - TOPIC_NAME + - RECORD_NAME + - TOPIC_RECORD_NAME + nullable: true + schema_id: + type: integer + nullable: true + schema_version: + type: integer + nullable: true + schema: + type: string + nullable: true + data: + $ref: '#/components/schemas/AnyValue' + nullable: true + + ProduceRequestHeader: + type: object + required: + - name + properties: + name: + type: string + value: + type: string + format: byte + nullable: true + + ReassignmentData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - topic_name + - partition_id + - adding_replicas + - removing_replicas + - replicas + properties: + cluster_id: + type: string + topic_name: + type: string + partition_id: + type: integer + adding_replicas: + type: array + items: + type: integer + removing_replicas: + type: array + items: + type: integer + replicas: + $ref: '#/components/schemas/Relationship' + + ReassignmentDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/ReassignmentData' + + Relationship: + type: object + required: + - related + properties: + related: + type: string + + ReplicaData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - topic_name + - partition_id + - broker_id + - is_leader + - is_in_sync + - broker + properties: + cluster_id: + type: string + topic_name: + type: string + partition_id: + type: integer + broker_id: + type: integer + is_leader: + type: boolean + is_in_sync: + type: boolean + broker: + $ref: '#/components/schemas/Relationship' + + ReplicaDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/ReplicaData' + + Resource: + type: object + required: + - kind + - metadata + properties: + kind: + type: string + metadata: + $ref: '#/components/schemas/ResourceMetadata' + + ResourceCollection: + type: object + required: + - kind + - metadata + properties: + kind: + type: string + metadata: + $ref: '#/components/schemas/ResourceCollectionMetadata' + + ResourceCollectionMetadata: + type: object + required: + - self + properties: + self: + type: string + next: + type: string + nullable: true + + ResourceMetadata: + type: object + required: + - self + properties: + self: + type: string + resource_name: + type: string + nullable: true + + TopicConfigData: + allOf: + - $ref: '#/components/schemas/AbstractConfigData' + - type: object + required: + - topic_name + properties: + topic_name: + type: string + + TopicConfigDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/TopicConfigData' + + TopicData: + allOf: + - $ref: '#/components/schemas/Resource' + - type: object + required: + - cluster_id + - topic_name + - is_internal + - replication_factor + - partitions_count + - partitions + - configs + - partition_reassignments + properties: + cluster_id: + type: string + topic_name: + type: string + is_internal: + type: boolean + replication_factor: + type: integer + partitions_count: + type: integer + partitions: + $ref: '#/components/schemas/Relationship' + configs: + $ref: '#/components/schemas/Relationship' + partition_reassignments: + $ref: '#/components/schemas/Relationship' + authorized_operations: + $ref: '#/components/schemas/AuthorizedOperations' + + TopicDataList: + allOf: + - $ref: '#/components/schemas/ResourceCollection' + - type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/TopicData' + + UpdateConfigRequestData: + type: object + properties: + value: + type: string + nullable: true + + UpdatePartitionCountRequestData: + type: object + required: + - partitions_count + properties: + partitions_count: + type: integer + format: int32 + + requestBodies: + AlterBrokerConfigBatchRequest: + description: 'The alter broker configuration parameter batch request.' + content: + application/json: + schema: + $ref: '#/components/schemas/AlterConfigBatchRequestData' + example: + data: + - name: 'max.connections' + operation: 'DELETE' + - name: 'compression.type' + value: 'gzip' + + AlterClusterConfigBatchRequest: + description: 'The alter cluster configuration parameter batch request.' + content: + application/json: + schema: + $ref: '#/components/schemas/AlterConfigBatchRequestData' + example: + data: + - name: 'max.connections' + operation: 'DELETE' + - name: 'compression.type' + value: 'gzip' + + AlterTopicConfigBatchRequest: + description: 'The alter topic configuration parameter batch request.' + content: + application/json: + schema: + $ref: '#/components/schemas/AlterConfigBatchRequestData' + examples: + batch_alter_topic_configs: + value: + data: + - name: 'cleanup.policy' + operation: 'DELETE' + - name: 'compression.type' + value: 'gzip' + validate_only_batch_alter_topic_configs: + value: + data: + - name: 'cleanup.policy' + operation: 'DELETE' + - name: 'compression.type' + value: 'gzip' + validate_only: true + + CreateAclRequest: + description: 'The ACL creation request.' + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAclRequestData' + example: + resource_type: 'CLUSTER' + resource_name: 'kafka-cluster' + pattern_type: 'LITERAL' + principal: 'principalType:principalName' + host: '*' + operation: 'DESCRIBE' + permission: 'DENY' + + BatchCreateAclRequest: + description: 'The batch ACL creation request.' + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAclRequestDataList' + example: + data: + - resource_type: 'CLUSTER' + resource_name: 'kafka-cluster' + pattern_type: 'LITERAL' + principal: 'principalType:principalName' + host: '*' + operation: 'DESCRIBE' + permission: 'DENY' + - resource_type: 'TOPIC' + resource_name: 'kafka-cluster' + pattern_type: 'LITERAL' + principal: 'principalType:principalName' + host: '*' + operation: 'READ' + permission: 'ALLOW' + + CreateTopicRequest: + description: 'The topic creation request.' + content: + application/json: + schema: + $ref: '#/components/schemas/CreateTopicRequestData' + examples: + uniform_replication: + value: + topic_name: 'topic-X' + partitions_count: 64 + replication_factor: 3 + configs: + - name: 'cleanup.policy' + value: 'compact' + - name: 'compression.type' + value: 'gzip' + explicit_replicas_assignments: + value: + topic_name: 'topic-X' + replicas_assignments: + "0": [1,2] + "1": [2,3] + "2": [3,1] + configs: + - name: 'cleanup.policy' + value: 'compact' + - name: 'compression.type' + value: 'gzip' + dry_run_create_topic: + value: + topic_name: 'topic-X' + partitions_count: 64 + replication_factor: 3 + validate_only: true + + ProduceBatchRequest: + description: 'A batch of records to be produced to Kafka. The delivery reports are sent in the response + in the same order as the records in the request.' + content: + application/json: + schema: + $ref: '#/components/schemas/ProduceBatchRequest' + examples: + string_batch: + description: 'If using type, one of "BINARY", "JSON" or "STRING" is required.' + value: + entries: + - id: "1" + value: + type: 'STRING' + data: 'My first message' + - id: "2" + value: + type: 'STRING' + data: 'My second message' + + ProduceRequest: + description: 'A single record to be produced to Kafka. To produce multiple records in the same + request, simply concatenate the records. The delivery reports are concatenated + in the same order as the records are sent.' + content: + application/json: + schema: + $ref: '#/components/schemas/ProduceRequest' + examples: + binary_and_json: + description: 'If using type, one of "BINARY", "JSON" or "STRING" is required.' + value: + partition_id: 1 + headers: + - name: 'Header-1' + value: 'SGVhZGVyLTE=' + - name: 'Header-2' + value: 'SGVhZGVyLTI=' + key: + type: 'BINARY' + data: 'Zm9vYmFy' + value: + type: 'JSON' + data: {"foo":"bar"} + timestamp: '2021-02-05T19:14:42Z' + binary_and_avro_with_subject_and_raw_schema: + description: 'If using type, one of "BINARY", "JSON", "STRING", or using the schema field is required.' + value: + partition_id: 1 + headers: + - name: 'Header-1' + value: 'SGVhZGVyLTE=' + - name: 'Header-2' + value: 'SGVhZGVyLTI=' + key: + type: 'BINARY' + data: 'Zm9vYmFy' + value: + type: 'AVRO' + subject: 'topic-1-key' + schema: '{\"type\":\"string\"}' + data: 'foobar' + timestamp: '2021-02-05T19:14:42Z' + string: + description: 'If using type, one of "BINARY", "JSON" or "STRING" is required.' + value: + value: + type: 'STRING' + data: 'My message' + schema_id_and_schema_version: + description: 'If not setting type, the record is assumed to use a schema. The actual + schema is queried from Schema Registry based on the subject, schema_id + and schema_version. You can specify the subject directly or you can use + subject_name_strategy. If neither is specified, subject_name_strategy is + assumed to be TOPIC_NAME. You can use either schema_id or schema_version + to identify the actual schema in the subject.' + value: + key: + subject_name_strategy: 'TOPIC_NAME' + schema_id: 1 + data: 1000 + value: + schema_version: 1 + data: + foo: 'bar' + latest_schema: + description: 'If neither schema_id or schema_version are specified, the latest schema + for the subject is used. This should be the preferred way of using the + API. You should register the schema with Schema Registry directly, then + on Kafka REST you do not need to say anything about the schema and we + will fetch the latest one for you.' + value: + key: + data: 1000 + value: + data: 'foobar' + null_and_empty_data: + description: 'data can be omitted or can be null.' + value: + key: + schema_id: 1 + value: + schema_version: 1 + data: null + empty_value: + description: 'key or value can be omitted entirely.' + value: + key: + data: 1000 + + UpdateBrokerConfigRequest: + description: 'The broker configuration parameter update request.' + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateConfigRequestData" + example: + value: 'gzip' + + UpdateClusterConfigRequest: + description: 'The cluster configuration parameter update request.' + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateConfigRequestData" + example: + value: 'gzip' + + UpdateTopicConfigRequest: + description: 'The topic configuration parameter update request.' + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateConfigRequestData" + example: + value: 'gzip' + + UpdatePartitionCountRequest: + description : 'The number of partitions to increase the partition count to.' + content: + application/json: + schema: + $ref: "#/components/schemas/UpdatePartitionCountRequestData" + example: + partitions_count: 10 + + + responses: + CreateTopicResponse: + description: 'The created topic.' + content: + application/json: + schema: + $ref: '#/components/schemas/TopicData' + example: + kind: 'KafkaTopic' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-X' + resource_name: 'crn:///kafka=cluster-1/topic=topic-X' + cluster_id: 'cluster-1' + topic_name: 'topic-X' + is_internal: false + replication_factor: 3 + partitions_count: 1 + partitions: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-X/partitions' + configs: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-X/configs' + partition_reassignments: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-X/partitions/-/reassignments' + + DeleteAclsResponse: + description: 'The list of deleted ACLs.' + content: + application/json: + schema: + type: object + required: + - data + properties: + data: + type: array + items: + $ref: '#/components/schemas/AclData' + example: + data: + - kind: 'KafkaAcl' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/acls?resource_type=TOPIC&resource_name=topic-&pattern_type=PREFIXED&principal=User%3Aalice&host=*&operation=ALL&permission=ALLOW' + cluster_id: 'cluster-1' + resource_type: 'TOPIC' + resource_name: 'topic-' + pattern_type: 'PREFIXED' + principal: 'User:alice' + host: '*' + operation: 'ALL' + permission: 'ALLOW' + - kind: 'KafkaAcl' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/acls?resource_type=CLUSTER&resource_name=kafka-cluster&pattern_type=LITERAL&principal=User%3Aalice&host=*&operation=DESCRIBE&permission=DENY' + cluster_id: 'cluster-1' + resource_type: 'CLUSTER' + resource_name: 'kafka-cluster' + pattern_type: 'LITERAL' + principal: 'User:alice' + host: '*' + operation: 'DESCRIBE' + permission: 'DENY' + + GetBrokerConfigResponse: + description: 'The broker configuration parameter.' + content: + application/json: + schema: + $ref: '#/components/schemas/BrokerConfigData' + example: + kind: 'KafkaBrokerConfig' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1/configs/compression.type' + resource_name: 'crn:///kafka=cluster-1/broker=1/config=compression.type' + cluster_id: 'cluster-1' + broker_id: 1 + name: 'compression.type' + value: 'gzip' + is_default: false + is_read_only: false + is_sensitive: false + source: 'DYNAMIC_BROKER_CONFIG' + synonyms: + - name: 'compression.type' + value: 'gzip' + source: 'DYNAMIC_BROKER_CONFIG' + - name: 'compression.type' + value: 'producer' + source: 'DEFAULT_CONFIG' + + GetBrokerResponse: + description: 'The broker.' + content: + application/json: + schema: + $ref: '#/components/schemas/BrokerData' + example: + kind: 'KafkaBroker' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1' + resource_name: 'crn:///kafka=cluster-1/broker=1' + cluster_id: 'cluster-1' + broker_id: 1 + host: 'localhost' + port: 9291 + configs: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1/configs' + partition_replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1/partition-replicas' + + GetClusterConfigResponse: + description: 'The cluster configuration parameter.' + content: + application/json: + schema: + $ref: '#/components/schemas/ClusterConfigData' + example: + kind: 'KafkaClusterConfig' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/broker-configs/compression.type' + resource_name: 'crn:///kafka=cluster-1/broker-config=compression.type' + cluster_id: 'cluster-1' + config_type: 'BROKER' + name: 'compression.type' + value: 'gzip' + is_default: false + is_read_only: false + is_sensitive: false + source: 'DYNAMIC_DEFAULT_BROKER_CONFIG' + synonyms: + - name: 'compression.type' + value: 'gzip' + source: 'DYNAMIC_DEFAULT_BROKER_CONFIG' + - name: 'compression.type' + value: 'producer' + source: 'DEFAULT_CONFIG' + + GetClusterResponse: + description: 'The Kafka cluster.' + content: + application/json: + schema: + $ref: '#/components/schemas/ClusterData' + example: + kind: 'KafkaCluster' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1' + resource_name: 'crn:///kafka=cluster-1' + cluster_id: 'cluster-1' + controller: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1' + acls: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/acls' + brokers: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers' + broker_configs: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/broker-configs' + consumer_groups: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups' + topics: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics' + partition_reassignments: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/-/partitions/-/reassignment' + + GetConsumerAssignmentResponse: + description: 'The consumer group assignment.' + content: + application/json: + schema: + $ref: '#/components/schemas/ConsumerAssignmentData' + example: + kind: 'KafkaConsumerAssignment' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-1/assignments/topic-1/partitions/1' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/consumer=consumer-1/assignment=topic=1/partition=1' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + consumer_id: 'consumer-1' + topic_name: 'topic-1' + partition_id: 1 + partition: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1' + lag: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lags/topic-1/partitions/1' + + GetConsumerGroupResponse: + description: 'The consumer group.' + content: + application/json: + schema: + $ref: '#/components/schemas/ConsumerGroupData' + example: + kind: 'KafkaConsumerGroup' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + is_simple: false + partition_assignor: 'org.apache.kafka.clients.consumer.RoundRobinAssignor' + state: 'STABLE' + coordinator: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1' + consumers: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers' + lag_summary: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lag-summary' + + GetConsumerGroupLagSummaryResponse: + description: 'The max and total consumer lag in a consumer group.' + content: + application/json: + schema: + $ref: '#/components/schemas/ConsumerGroupLagSummaryData' + example: + kind: 'KafkaConsumerGroupLagSummary' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lag-summary' + resource_name: 'crn:///kafka=cluster-1/consumer-groups=consumer-group-1/lag-summary' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + max_lag_consumer_id: 'consumer-1' + max_lag_instance_id: 'consumer-instance-1' + max_lag_client_id: 'client-1' + max_lag_topic_name: 'topic-1' + max_lag_partition_id: 1 + max_lag: 100 + total_lag: 110 + max_lag_consumer: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-1' + max_lag_partition: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1' + + GetConsumerLagResponse: + description: 'The consumer lag.' + content: + application/json: + schema: + $ref: '#/components/schemas/ConsumerLagData' + example: + kind: 'KafkaConsumerLag' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lags/topic-1/partitions/1' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/lag=topic-1/partition=1' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + topic_name: 'topic-1' + partition_id: 1 + consumer_id: 'consumer-1' + instance_id: 'consumer-instance-1' + client_id: 'client-1' + current_offset: 1 + log_end_offset: 101 + lag: 100 + + GetConsumerResponse: + description: 'The consumer.' + content: + application/json: + schema: + $ref: '#/components/schemas/ConsumerData' + example: + kind: 'KafkaConsumer' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-1' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/consumer=consumer-1' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + consumer_id: 'consumer-1' + instance_id: 'consumer-instance-1' + client_id: 'client-1' + assignments: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-1/assignments' + + GetPartitionResponse: + description: 'The partition' + content: + application/json: + schema: + $ref: '#/components/schemas/PartitionData' + example: + kind: 'KafkaPartition' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=1' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + leader: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas/1' + replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas' + reassignment: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/reassignment' + + GetReassignmentResponse: + description: 'The ongoing replicas reassignments.' + content: + application/json: + schema: + $ref: '#/components/schemas/ReassignmentData' + example: + kind: 'KafkaReassignment' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/reassignment' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=1/reassignment' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + adding_replicas: + - 1 + - 2 + removing_replicas: + - 3 + replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas' + + GetReplicaResponse: + description: 'The replica.' + content: + application/json: + schema: + $ref: '#/components/schemas/ReplicaData' + example: + kind: 'KafkaReplica' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas/1' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=1/replica=1' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + broker_id: 1 + is_leader: true + is_in_sync: true + broker: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1' + + GetTopicConfigResponse: + description: 'The topic configuration parameter.' + content: + application/json: + schema: + $ref: '#/components/schemas/TopicConfigData' + example: + kind: 'KafkaTopicConfig' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/compression.type' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/config=compression.type' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + name: 'compression.type' + value: 'gzip' + is_default: false + is_read_only: false + is_sensitive: false + source: 'DYNAMIC_TOPIC_CONFIG' + synonyms: + - name: 'compression.type' + value: 'gzip' + source: 'DYNAMIC_TOPIC_CONFIG' + - name: 'compression.type' + value: 'producer' + source: 'DEFAULT_CONFIG' + + GetTopicResponse: + description: 'The topic.' + content: + application/json: + schema: + $ref: '#/components/schemas/TopicData' + example: + kind: 'KafkaTopic' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + is_internal: false + replication_factor: 3 + partitions_count: 1 + partitions: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions' + configs: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/configs' + partition_reassignments: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/-/reassignments' + + ListAllReassignmentsResponse: + description: "The ongoing replicas reassignments." + content: + application/json: + schema: + $ref: '#/components/schemas/ReassignmentDataList' + example: + kind: 'KafkaReassignmentList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/-/partitions/-/reassignment' + next: null + data: + - kind: 'KafkaReassignment' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/reassignment' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=1/reassignment' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + adding_replicas: + - 1 + - 2 + removing_replicas: + - 3 + replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas' + - kind: 'KafkaReassignment' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-2/partitions/2/reassignment' + resource_name: 'crn:///kafka=cluster-1/topic=topic-2/partition=2/reassignment' + cluster_id: 'cluster-1' + topic_name: 'topic-2' + partition_id: 2 + adding_replicas: + - 1 + removing_replicas: + - 2 + - 3 + replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-2/partitions/2/replicas' + - kind: 'KafkaReassignment' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-3/partitions/3/reassignment' + resource_name: 'crn:///kafka=cluster-1/topic=topic-3/partition=3/reassignment' + cluster_id: 'cluster-1' + topic_name: 'topic-3' + partition_id: 3 + adding_replicas: + - 3 + removing_replicas: + - 1 + - 2 + replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-3/partitions/3/replicas' + + ListBrokerConfigsResponse: + description: 'The list of broker configs.' + content: + application/json: + schema: + $ref: '#/components/schemas/BrokerConfigDataList' + example: + kind: 'KafkaBrokerConfigList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1/configs' + next: null + data: + - kind: 'KafkaBrokerConfig' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1/configs/max.connections' + resource_name: 'crn:///kafka=cluster-1/broker=1/config=max.connections' + cluster_id: 'cluster-1' + broker_id: 1 + name: 'max.connections' + value: '1000' + is_default: false + is_read_only: false + is_sensitive: false + source: 'DYNAMIC_BROKER_CONFIG' + synonyms: + - name: 'max.connections' + value: '1000' + source: 'DYNAMIC_BROKER_CONFIG' + - name: 'max.connections' + value: '2147483647' + source: 'DEFAULT_CONFIG' + - kind: 'KafkaBrokerConfig' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1/configs/compression.type' + resource_name: 'crn:///kafka=cluster-1/broker=1/config=compression.type' + cluster_id: 'cluster-1' + broker_id: 1 + name: 'compression.type' + value: 'gzip' + is_default: false + is_read_only: false + is_sensitive: false + source: 'DYNAMIC_BROKER_CONFIG' + synonyms: + - name: 'compression.type' + value: 'gzip' + source: 'DYNAMIC_BROKER_CONFIG' + - name: 'compression.type' + value: 'producer' + source: 'DEFAULT_CONFIG' + + ListBrokersResponse: + description: 'The list of brokers.' + content: + application/json: + schema: + $ref: '#/components/schemas/BrokerDataList' + example: + kind: 'KafkaBrokerList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers' + next: null + data: + - kind: 'KafkaBroker' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1' + resource_name: 'crn:///kafka=cluster-1/broker=1' + cluster_id: 'cluster-1' + broker_id: 1 + host: 'localhost' + port: 9291 + configs: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1/configs' + partition_replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1/partition-replicas' + - kind: 'KafkaBroker' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/2' + resource_name: 'crn:///kafka=cluster-1/broker=2' + cluster_id: 'cluster-1' + broker_id: 2 + host: 'localhost' + port: 9292 + configs: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/2/configs' + partition_replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/2/partition-replicas' + - kind: 'KafkaBroker' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/3' + resource_name: 'crn:///kafka=cluster-1/broker=3' + cluster_id: 'cluster-1' + broker_id: 3 + host: 'localhost' + port: 9293 + configs: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/3/configs' + partition_replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/3/partition-replicas' + + ListClusterConfigsResponse: + description: 'The list of cluster configs.' + content: + application/json: + schema: + $ref: '#/components/schemas/ClusterConfigDataList' + example: + kind: 'KafkaClusterConfigList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/broker-configs' + next: null + data: + - kind: 'KafkaClusterConfig' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/broker-configs/max.connections' + resource_name: 'crn:///kafka=cluster-1/broker-config=max.connections' + cluster_id: 'cluster-1' + config_type: 'BROKER' + name: 'max.connections' + value: '1000' + is_default: false + is_read_only: false + is_sensitive: false + source: 'DYNAMIC_DEFAULT_BROKER_CONFIG' + synonyms: + - name: 'max.connections' + value: '1000' + source: 'DYNAMIC_DEFAULT_BROKER_CONFIG' + - name: 'max.connections' + value: '2147483647' + source: 'DEFAULT_CONFIG' + - kind: 'KafkaClusterConfig' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/broker-configs/compression.type' + resource_name: 'crn:///kafka=cluster-1/broker-config=compression.type' + cluster_id: 'cluster-1' + config_type: 'BROKER' + name: 'compression.type' + value: 'gzip' + is_default: false + is_read_only: false + is_sensitive: false + source: 'DYNAMIC_DEFAULT_BROKER_CONFIG' + synonyms: + - name: 'compression.type' + value: 'gzip' + source: 'DYNAMIC_DEFAULT_BROKER_CONFIG' + - name: 'compression.type' + value: 'producer' + source: 'DEFAULT_CONFIG' + + ListClustersResponse: + description: 'The list of Kafka clusters.' + content: + application/json: + schema: + $ref: '#/components/schemas/ClusterDataList' + example: + kind: 'KafkaClusterList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters' + next: null + data: + - kind: 'KafkaCluster' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1' + resource_name: 'crn:///kafka=cluster-1' + cluster_id: 'cluster-1' + controller: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1' + acls: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/acls' + brokers: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers' + broker_configs: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/broker-configs' + consumer_groups: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups' + topics: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics' + partition_reassignments: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/-/partitions/-/reassignment' + + ListConsumerAssignmentsResponse: + description: 'The list of consumer group assignments.' + content: + application/json: + schema: + $ref: '#/components/schemas/ConsumerAssignmentDataList' + example: + kind: 'KafkaConsumerAssignmentList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-1/assignments' + next: null + data: + - kind: 'KafkaConsumerAssignment' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-1/assignments/topic-1/partitions/1' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/consumer=consumer-1/assignment=topic=1/partition=1' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + consumer_id: 'consumer-1' + topic_name: 'topic-1' + partition_id: 1 + partition: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1' + lag: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lags/topic-1/partitions/1' + - kind: 'KafkaConsumerAssignment' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-1/assignments/topic-2/partitions/2' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/consumer=consumer-1/assignment=topic=2/partition=2' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + consumer_id: 'consumer-1' + topic_name: 'topic-2' + partition_id: 2 + partition: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-2/partitions/2' + lag: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lags/topic-2/partitions/2' + - kind: 'KafkaConsumerAssignment' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-1/assignments/topic-3/partitions/3' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/consumer=consumer-1/assignment=topic=3/partition=3' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + consumer_id: 'consumer-1' + topic_name: 'topic-3' + partition_id: 3 + partition: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-3/partitions/3' + lag: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lags/topic-3/partitions/3' + + ListConsumerGroupsResponse: + description: 'The list of consumer groups.' + content: + application/json: + schema: + $ref: '#/components/schemas/ConsumerGroupDataList' + example: + kind: 'KafkaConsumerGroupList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups' + next: null + data: + - kind: 'KafkaConsumerGroup' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + is_simple: false + partition_assignor: 'org.apache.kafka.clients.consumer.RoundRobinAssignor' + state: 'STABLE' + coordinator: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1' + consumers: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers' + lag_summary: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lag-summary' + - kind: 'KafkaConsumerGroup' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-2' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-2' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-2' + is_simple: false + partition_assignor: 'org.apache.kafka.clients.consumer.StickyAssignor' + state: 'PREPARING_REBALANCE' + coordinator: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/2' + consumers: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-2/consumers' + lag_summary: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-2/lag-summary' + - kind: 'KafkaConsumerGroup' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-3' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-3' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-3' + is_simple: false + partition_assignor: 'org.apache.kafka.clients.consumer.RangeAssignor' + state: 'DEAD' + coordinator: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/3' + consumers: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-3/consumers' + lag_summary: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-3/lag-summary' + + ListConsumerLagsResponse: + description: 'The list of consumer lags.' + content: + application/json: + schema: + $ref: '#/components/schemas/ConsumerLagDataList' + example: + kind: 'KafkaConsumerLagList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lags' + next: null + data: + - kind: 'KafkaConsumerLag' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lags/topic-1/partitions/1' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/lag=topic-1/partition=1' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + topic_name: 'topic-1' + partition_id: 1 + consumer_id: 'consumer-1' + instance_id: 'consumer-instance-1' + client_id: 'client-1' + current_offset: 1 + log_end_offset: 101 + lag: 100 + - kind: 'KafkaConsumerLag' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lags/topic-1/partitions/2' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/lag=topic-1/partition=2' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + topic_name: 'topic-1' + partition_id: 2 + consumer_id: 'consumer-2' + instance_id: 'consumer-instance-2' + client_id: 'client-2' + current_offset: 1 + log_end_offset: 11 + lag: 10 + - kind: 'KafkaConsumerLag' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lags/topic-1/partitions/3' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/lag=topic-1/partition=3' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + topic_name: 'topic-1' + partition_id: 3 + consumer_id: 'consumer-3' + instance_id: 'consumer-instance-3' + client_id: 'client-3' + current_offset: 1 + log_end_offset: 1 + lag: 0 + + ListConsumersResponse: + description: 'The list of consumers.' + content: + application/json: + schema: + $ref: '#/components/schemas/ConsumerDataList' + example: + kind: 'KafkaConsumerList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers' + next: null + data: + - kind: 'KafkaConsumer' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-1' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/consumer=consumer-1' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + consumer_id: 'consumer-1' + instance_id: 'consumer-instance-1' + client_id: 'client-1' + assignments: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-1/assignments' + - kind: 'KafkaConsumer' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-2' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/consumer=consumer-2' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + consumer_id: 'consumer-2' + instance_id: 'consumer-instance-2' + client_id: 'client-2' + assignments: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-2/assignments' + - kind: 'KafkaConsumer' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-2' + resource_name: 'crn:///kafka=cluster-1/consumer-group=consumer-group-1/consumer=consumer-2' + cluster_id: 'cluster-1' + consumer_group_id: 'consumer-group-1' + consumer_id: 'consumer-2' + instance_id: 'consumer-instance-2' + client_id: 'client-2' + assignments: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/consumer-groups/consumer-group-1/consumers/consumer-2/assignments' + + ListPartitionsResponse: + description: 'The list of partitions.' + content: + application/json: + schema: + $ref: '#/components/schemas/PartitionDataList' + example: + kind: 'KafkaPartitionList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions' + next: null + data: + - kind: 'KafkaPartition' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=1' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + leader: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas/1' + replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas' + reassignment: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/reassignment' + - kind: 'KafkaPartition' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/2' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=2' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 2 + leader: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/2/replicas/2' + replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/2/replicas' + reassignment: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/2/reassignment' + - kind: 'KafkaPartition' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/3' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=3' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 3 + leader: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/3/replicas/3' + replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/3/replicas' + reassignment: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/3/reassignment' + + ListReplicasResponse: + description: 'The list of replicas.' + content: + application/json: + schema: + $ref: '#/components/schemas/ReplicaDataList' + example: + kind: 'KafkaReplicaList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas' + next: null + data: + - kind: 'KafkaReplica' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas/1' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=1/replica=1' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + broker_id: 1 + is_leader: true + is_in_sync: true + broker: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1' + - kind: 'KafkaReplica' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas/2' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=1/replica=2' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + broker_id: 2 + is_leader: false + is_in_sync: true + broker: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/2' + - kind: 'KafkaReplica' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas/3' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=1/replica=3' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + broker_id: 3 + is_leader: false + is_in_sync: false + broker: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/3' + + ListTopicConfigsResponse: + description: 'The list of cluster configs.' + content: + application/json: + schema: + $ref: '#/components/schemas/TopicConfigDataList' + example: + kind: 'KafkaTopicConfigList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/configs' + next: null + data: + - kind: 'KafkaTopicConfig' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/configs/cleanup.policy' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/config=cleanup.policy' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + name: 'cleanup.policy' + value: 'compact' + is_default: false + is_read_only: false + is_sensitive: false + source: 'DYNAMIC_TOPIC_CONFIG' + synonyms: + - name: 'cleanup.policy' + value: 'compact' + source: 'DYNAMIC_TOPIC_CONFIG' + - name: 'cleanup.policy' + value: 'delete' + source: 'DEFAULT_CONFIG' + - kind: 'KafkaTopicConfig' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/configs/compression.type' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/config=compression.type' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + name: 'compression.type' + value: 'gzip' + is_default: false + is_read_only: false + is_sensitive: false + source: 'DYNAMIC_TOPIC_CONFIG' + synonyms: + - name: 'compression.type' + value: 'gzip' + source: 'DYNAMIC_TOPIC_CONFIG' + - name: 'compression.type' + value: 'producer' + source: 'DEFAULT_CONFIG' + + ListTopicsResponse: + description: 'The list of topics.' + content: + application/json: + schema: + $ref: '#/components/schemas/TopicDataList' + example: + kind: 'KafkaTopicList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics' + next: null + data: + - kind: 'KafkaTopic' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + is_internal: false + replication_factor: 3 + partitions_count: 1 + partitions: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions' + configs: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/configs' + partition_reassignments: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/-/reassignments' + - kind: 'KafkaTopic' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-2' + resource_name: 'crn:///kafka=cluster-1/topic=topic-2' + cluster_id: 'cluster-1' + topic_name: 'topic-2' + is_internal: true + replication_factor: 4 + partitions_count: 1 + partitions: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-2/partitions' + configs: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-2/configs' + partition_reassignments: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-2/partitions/-/reassignments' + - kind: 'KafkaTopic' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-3' + resource_name: 'crn:///kafka=cluster-1/topic=topic-3' + cluster_id: 'cluster-1' + topic_name: 'topic-3' + is_internal: false + replication_factor: 5 + partitions_count: 1 + partitions: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-3/partitions' + configs: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-3/configs' + partition_reassignments: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-3/partitions/-/reassignments' + + ProduceBatchResponse: + description: |- + The response containing a delivery report for each record produced to a topic. + A separate delivery report will be returned, in the same order, each with its own error_code. + content: + application/json: + schema: + $ref: '#/components/schemas/ProduceBatchResponse' + examples: + produce_batch_record_success: + description: The records were successfully produced to the topic. + value: + successes: + - id: "1" + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + offset: 0 + timestamp: '2021-02-05T19:14:42Z' + key: + type: BINARY + size: 7 + value: + type: JSON + size: 15 + - id: "2" + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + offset: 1 + timestamp: '2021-02-05T19:14:43Z' + key: + type: BINARY + size: 7 + value: + type: JSON + size: 15 + failures: [] + produce_batch_record_success_and_failure: + description: One record was produced to the topic successfully, and one failed. + value: + successes: + - id: "1" + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + offset: 0 + timestamp: '2021-02-05T19:14:42Z' + key: + type: BINARY + size: 7 + value: + type: JSON + size: 15 + failures: + - id: "2" + error_code: 400 + message: "Bad Request: data=1 is not a base64 string." + + ProduceResponse: + description: |- + The response containing a delivery report for a record produced to a topic. In streaming mode, + for each record sent, a separate delivery report will be returned, in the same order, + each with its own error_code. + content: + application/json: + schema: + $ref: '#/components/schemas/ProduceResponse' + examples: + produce_record_success: + description: The record was successfully produced to the topic. + value: + error_code: 200 + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + offset: 0 + timestamp: '2021-02-05T19:14:42Z' + key: + type: BINARY + size: 7 + value: + type: JSON + size: 15 + produce_record_bad_binary_data: + description: Thrown when sending a BINARY value which is not a base64-encoded string. + value: + error_code: 400 + message: "Bad Request: data=1 is not a base64 string." + + SearchAclsResponse: + description: 'The list of ACLs.' + content: + application/json: + schema: + $ref: '#/components/schemas/AclDataList' + example: + kind: 'KafkaAclList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/acls?principal=User%3Aalice' + data: + - kind: 'KafkaAcl' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/acls?resource_type=TOPIC&resource_name=topic-&pattern_type=PREFIXED&principal=User%3Aalice&host=*&operation=ALL&permission=ALLOW' + cluster_id: 'cluster-1' + resource_type: 'TOPIC' + resource_name: 'topic-' + pattern_type: 'PREFIXED' + principal: 'User:alice' + host: '*' + operation: 'ALL' + permission: 'ALLOW' + - kind: 'KafkaAcl' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/acls?resource_type=CLUSTER&resource_name=kafka-cluster&pattern_type=LITERAL&principal=User%3Aalice&host=*&operation=DESCRIBE&permission=DENY' + cluster_id: 'cluster-1' + resource_type: 'CLUSTER' + resource_name: 'kafka-cluster' + pattern_type: 'LITERAL' + principal: 'User:alice' + host: '*' + operation: 'DESCRIBE' + permission: 'DENY' + + SearchReassignmentsByTopicResponse: + description: "The ongoing replicas reassignments." + content: + application/json: + schema: + $ref: '#/components/schemas/ReassignmentDataList' + example: + kind: 'KafkaReassignmentList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/-/partitions/-/reassignment' + next: null + data: + - kind: 'KafkaReassignment' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/reassignment' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=1/reassignment' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 1 + adding_replicas: + - 1 + - 2 + removing_replicas: + - 3 + replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas' + - kind: 'KafkaReassignment' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/2/reassignment' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=2/reassignment' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 2 + adding_replicas: + - 1 + removing_replicas: + - 2 + - 3 + replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/2/replicas' + - kind: 'KafkaReassignment' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/3/reassignment' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=3/reassignment' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 3 + adding_replicas: + - 3 + removing_replicas: + - 1 + - 2 + replicas: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/3/replicas' + + SearchReplicasByBrokerResponse: + description: 'The list of replicas.' + content: + application/json: + schema: + $ref: '#/components/schemas/ReplicaDataList' + example: + kind: 'KafkaReplicaList' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1/partition-replicas' + next: null + data: + - kind: 'KafkaReplica' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-1/partitions/2/replicas/1' + resource_name: 'crn:///kafka=cluster-1/topic=topic-1/partition=2/replica=1' + cluster_id: 'cluster-1' + topic_name: 'topic-1' + partition_id: 2 + broker_id: 1 + is_leader: true + is_in_sync: true + broker: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1' + - kind: 'KafkaReplica' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-2/partitions/3/replicas/1' + resource_name: 'crn:///kafka=cluster-1/topic=topic-3/partition=3/replica=1' + cluster_id: 'cluster-1' + topic_name: 'topic-2' + partition_id: 3 + broker_id: 1 + is_leader: false + is_in_sync: true + broker: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1' + - kind: 'KafkaReplica' + metadata: + self: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/topics/topic-3/partitions/1/replicas/1' + resource_name: 'crn:///kafka=cluster-1/topic=topic-3/partition=1/replica=1' + cluster_id: 'cluster-1' + topic_name: 'topic-3' + partition_id: 1 + broker_id: 1 + is_leader: false + is_in_sync: false + broker: + related: 'https://pkc-00000.region.provider.confluent.cloud/kafka/v3/clusters/cluster-1/brokers/1' + + # Error responses + + BadRequestErrorResponse: + description: 'Indicates a bad request error. It could be caused by an unexpected request + body format or other forms of request validation failure.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + bad_request_cannot_deserialize: + description: "Thrown when trying to deserialize an integer from non-integer data." + value: + error_code: 400 + message: "Cannot deserialize value of type `java.lang.Integer` from String \"A\": not a valid `java.lang.Integer` value" + + BadRequestErrorResponse_CreateAcls: + description: 'Indicates a bad request error. It could be caused by an unexpected request + body format or other forms of request validation failure.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + create_acls_cluster_name_invalid: + description: "Thrown when creating an ACL for a CLUSTER resource specifying the wrong resource name." + value: + error_code: 40002 + message: The only valid name for the CLUSTER resource is kafka-cluster" + + BadRequestErrorResponse_CreateTopic: + description: 'Indicates a bad request error. It could be caused by an unexpected request + body format or other forms of request validation failure.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + create_topic_already_exists: + description: "Thrown when trying to create a topic with a name already used by an existing topic." + value: + error_code: 40002 + message: "Topic 'my-topic' already exists." + create_topic_replication_factor_too_large: + description: "Thrown when trying to create a topic with a replication factor larger than the number of brokers." + value: + error_code: 40002 + message: "Replication factor: 2 larger than available brokers: 1." + + BadRequestErrorResponse_DeleteAcls: + description: 'Indicates a bad request error. It could be caused by an unexpected request + body format or other forms of request validation failure.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + delete_acls_unspecified_resource_type: + description: "Thrown when trying to delete ACLs without specifying a valid resource type." + value: + error_code: 400 + message: "resource_type cannot be unspecified or UNKNOWN" + + BadRequestErrorResponse_ProduceRecords: + description: 'Indicates a bad request error. It could be caused by an unexpected request + body format or other forms of request validation failure.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + header_not_base64_encoded: + description: "Thrown when headers in the produce-record are not base64 encoded." + value: + error_code: 400 + message: "Cannot deserialize value of type `byte[]` from String \"\": Unexpected end of base64-encoded String: base64 variant 'MIME-NO-LINEFEEDS' expects padding (one or more '=' characters) at the end. This Base64Variant might have been incorrectly configured" + + UnprocessableEntity_ProduceRecord: + description: 'Indicates a bad request error. It could be caused by an unexpected request + body format or other forms of request validation failure.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + produce_record_empty_request_body: + description: "Thrown when the request body is empty." + value: + error_code: 422 + message: "Payload error. Request body is empty. Data is required." + + BadRequestErrorResponse_UpdatePartitionCountTopic: + description: 'Indicates a bad request error. It could be caused by an unexpected request + body format or other forms of request validation failure.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + topic_update_partitions_invalid: + description: "Thrown when trying to update the number of partitions incorrectly." + value: + error_code: 40002 + message: "Topic already has 1 partitions." + + UnauthorizedErrorResponse: + description: 'Indicates a client authentication error. Kafka authentication failures will contain + error code 40101 in the response body.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + kafka_authentication_failed: + description: "Thrown when using Basic authentication with wrong Kafka credentials." + value: + error_code: 40101 + message: "Authentication failed" + + ForbiddenErrorResponse: + description: 'Indicates a client authorization error. Kafka authorization failures will contain + error code 40301 in the response body.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + kafka_authorization_failed: + description: "Thrown when the caller is not authorized to perform the underlying operation." + value: + error_code: 40301 + message: "Request is not authorized" + + NotFoundErrorResponse: + description: 'Indicates attempted access to an unreachable or non-existing resource like e.g. an unknown topic + or partition. GET requests to endpoints not allowed in the accesslists will also result in this response.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + endpoint_not_found: + description: "Thrown for generic HTTP 404 errors." + value: + error_code: 404 + message: "HTTP 404 Not Found" + cluster_not_found: + description: "Thrown when using a non-existing cluster ID." + value: + error_code: 404 + message: "Cluster my-cluster cannot be found." + unknown_topic_or_partition: + description: "Thrown when using a non-existing topic name or partition ID." + value: + error_code: 40403 + message: "This server does not host this topic-partition." + + TooManyRequestsErrorResponse: + description: 'Indicates that a rate limit threshold has been reached, and the client should + retry again later.' + content: + text/html: + schema: + type: string + example: + description: "A sample response from Jetty's DoSFilter." + value: ' + + + Error 429 Too Many Requests + + +

HTTP ERROR 429 Too Many Requests

+ + + + + + + + + + + + + + + + + +
URI:/v3/clusters/my-cluster
STATUS:429
MESSAGE:Too Many Requests
SERVLET:default
+ + ' + + RequestEntityTooLargeErrorResponse: + description: 'This implies the client is sending a request payload that is larger than the maximum message size the + server can accept.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + produce_records_expects_json: + description: "Thrown by /records API if payload size exceeds the message max size" + value: + error_code: 413 + message: "The request included a message larger than the maximum message size the server can accept." + + UnsupportedMediaTypeErrorResponse: + description: 'This implies the client is sending the request payload format in an unsupported format.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + produce_records_expects_json: + description: "Thrown by /records API if payload format content-type doesn't match expected application/json" + value: + error_code: 415 + message: "HTTP 415 Unsupported Media Type" + + ServerErrorResponse: + description: 'A server-side problem that might not be addressable from the client side. + Retriable Kafka errors will contain error code 50003 in the response body.' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + generic_internal_server_error: + description: "Thrown for generic HTTP 500 errors." + value: + error_code: 500 + message: "Internal Server Error" + +tags: + - name: Cluster (v3) + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + - name: Configs (v3) + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + - name: Broker (v3) + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + - name: Replica (v3) + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + - name: ACL (v3) + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + - name: Consumer Group (v3) + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + - name: Partition (v3) + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + - name: Topic (v3) + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) + - name: Records (v3) + description: |- + [![Generally Available](https://img.shields.io/badge/Lifecycle%20Stage-Generally%20Available-%2345c6e8)](#section/Versioning/API-Lifecycle-Policy) diff --git a/pom.xml b/pom.xml index c638366e..2aed9d9c 100644 --- a/pom.xml +++ b/pom.xml @@ -789,6 +789,62 @@ + + org.openapitools + openapi-generator-maven-plugin + 7.8.0 + + + kafka-rest-spec + generate-sources + + generate + + + kafka-rest.openapi.yaml + jaxrs-spec + false + true + ClusterV3,TopicV3,PartitionV3 + + true + false + quarkus + false + io.confluent.idesidecar.restapi.kafkarest.api + io.confluent.idesidecar.restapi.kafkarest.model + true + true + true + true + false + false + true + + true + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.2.0 + + + add-source + generate-sources + + add-source + + + + ${project.build.directory}/generated-sources/openapi/src/gen/java + + + + + diff --git a/src/main/java/io/confluent/idesidecar/restapi/cache/AdminClients.java b/src/main/java/io/confluent/idesidecar/restapi/cache/AdminClients.java new file mode 100644 index 00000000..fbe173e3 --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/cache/AdminClients.java @@ -0,0 +1,53 @@ +package io.confluent.idesidecar.restapi.cache; + +import com.github.benmanes.caffeine.cache.CaffeineSpec; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import java.util.Map; +import java.util.Properties; +import org.apache.kafka.clients.admin.AdminClient; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +/** + * Create an ApplicationScoped bean to cache AdminClient instances by connection ID and client ID. + */ +@ApplicationScoped +public class AdminClients extends Clients { + + // Evict cached AdminClient instances after 5 minutes of inactivity + private static final String CAFFEINE_SPEC = "expireAfterAccess=5m"; + + @Inject + ClusterCache clusterCache; + + @ConfigProperty(name = "ide-sidecar.admin-client-configs") + Map adminClientSidecarConfigs; + + public AdminClients() { + super(CaffeineSpec.parse(CAFFEINE_SPEC)); + } + + /** + * Get an AdminClient for the given connection ID and Kafka cluster ID. + * If the client does not already exist, it will be created. + * @param connectionId The connection ID + * @param clusterId The cluster ID + * @return The AdminClient + */ + public AdminClient getClient(String connectionId, String clusterId) { + return getClient( + connectionId, + clusterId, + () -> AdminClient.create(getAdminClientConfig(connectionId, clusterId)) + ); + } + + private Properties getAdminClientConfig(String connectionId, String clusterId) { + var cluster = clusterCache.getKafkaCluster(connectionId, clusterId); + var props = new Properties(); + // Set AdminClient configs provided by the sidecar + props.putAll(adminClientSidecarConfigs); + props.put("bootstrap.servers", cluster.bootstrapServers()); + return props; + } +} diff --git a/src/main/java/io/confluent/idesidecar/restapi/cache/Clients.java b/src/main/java/io/confluent/idesidecar/restapi/cache/Clients.java new file mode 100644 index 00000000..b9677452 --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/cache/Clients.java @@ -0,0 +1,140 @@ +package io.confluent.idesidecar.restapi.cache; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.CaffeineSpec; +import com.github.benmanes.caffeine.cache.RemovalCause; +import io.confluent.idesidecar.restapi.connections.ConnectionState; +import io.confluent.idesidecar.restapi.events.Lifecycle; +import io.quarkus.logging.Log; +import jakarta.enterprise.event.ObservesAsync; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; + +/** + * Utilities to obtain and cache clients for a given connection and client ID. + */ +public abstract class Clients { + + /** + * Caffeine spec to use for the client cache. By default, this will be an empty spec. + * See the + * Caffeine Spec + * for more information on the format. Inherited classes may override this method + * and return a different spec. + */ + private final CaffeineSpec caffeineSpec; + + /** + * Store an instance of a Caffeine cache for each connection. The cache will store + * clients by client ID and its policy may be configured by the {@link #caffeineSpec}. + */ + private final Map> clientsByIdByConnections = + new ConcurrentHashMap<>(); + + protected Clients(CaffeineSpec caffeineSpec) { + this.caffeineSpec = caffeineSpec; + } + + protected Clients() { + this(CaffeineSpec.parse("")); + } + + /** + * Get a client for the given connection and client ID. If the client does not + * already exist, it will be created using the provided factory. + * + * @param connectionId the ID of the connection + * @param clientId the identifier of the client + * @param factory the method that will create the client if there is not already one + * @return the client + */ + public T getClient( + String connectionId, + String clientId, + Supplier factory + ) { + return clientsForConnection(connectionId).asMap().computeIfAbsent( + clientId, + k -> factory.get() + ); + } + + private Cache clientsForConnection(String connectionId) { + return clientsByIdByConnections.computeIfAbsent(connectionId, k -> createCache()); + } + + int clientCount() { + return clientsByIdByConnections + .values() + .stream() + .map(Cache::asMap) + .map(Map::size) + .mapToInt(Integer::intValue) + .sum(); + } + + int clientCount(String connectionId) { + return clientsForConnection(connectionId).asMap().size(); + } + + void clearClients(String connectionId) { + var oldCache = clientsByIdByConnections.put(connectionId, createCache()); + if (oldCache != null) { + // Invalidation will trigger the removal listener, which will close the clients + oldCache.invalidateAll(); + } + } + + void handleRemoval(String key, T value, RemovalCause cause) { + try { + if (value != null) { + Log.debugf("Closing client %s", value); + value.close(); + } + } catch (Throwable t) { + Log.debugf("Error closing client %s: %s", value, t); + // Ignore these as we don't care + } + } + + private Cache createCache() { + return Caffeine + .from(caffeineSpec) + .removalListener(this::handleRemoval) + .build(); + } + + /** + * Respond to the connection being disconnected by clearing and closing the + * clients that were cached for that connection. + * + * @param connection the connection that was disconnected + */ + void onConnectionDisconnected( + @ObservesAsync @Lifecycle.Disconnected ConnectionState connection + ) { + clearClients(connection.getId()); + } + + /** + * Respond to the connection being deleted by clearing and closing the + * Schema Registry clients that were cached for that connection. + * + * @param connection the connection that was deleted + */ + void onConnectionDeleted(@ObservesAsync @Lifecycle.Deleted ConnectionState connection) { + clearClients(connection.getId()); + } + + /** + * Respond to the connection being updated by clearing and closing the + * clients that were cached for that connection. This ensures that the clients + * don't use stale connection information. + * @param connection the connection that was updated + */ + void onConnectionUpdated(@ObservesAsync @Lifecycle.Updated ConnectionState connection) { + clearClients(connection.getId()); + } +} diff --git a/src/main/java/io/confluent/idesidecar/restapi/cache/ClusterCache.java b/src/main/java/io/confluent/idesidecar/restapi/cache/ClusterCache.java index eddfd323..3b0a56cc 100644 --- a/src/main/java/io/confluent/idesidecar/restapi/cache/ClusterCache.java +++ b/src/main/java/io/confluent/idesidecar/restapi/cache/ClusterCache.java @@ -25,6 +25,7 @@ import java.time.Duration; import java.util.Deque; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CompletionException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedDeque; @@ -91,6 +92,22 @@ public KafkaCluster getKafkaCluster(String connectionId, String clusterId) { return forConnection(connectionId).getKafkaCluster(clusterId); } + /** + * Find the first Kafka cluster accessible over the specified connection. This is useful when + * it is known that there is only one Kafka cluster per connection. + * @param connectionId the ID of the connection + * @return the info for the first Kafka cluster, or null if none found + */ + public Optional getKafkaClusterForConnection(String connectionId) { + return forConnection(connectionId) + .kafkaClusters + .values() + .stream() + .findFirst() + .map(ClusterInfo::spec); + } + + /** * Find the cluster info for the schema registry that is associated with the given Kafka cluster, * accessible over the specified connection. diff --git a/src/main/java/io/confluent/idesidecar/restapi/cache/SchemaRegistryClients.java b/src/main/java/io/confluent/idesidecar/restapi/cache/SchemaRegistryClients.java new file mode 100644 index 00000000..023b2ac1 --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/cache/SchemaRegistryClients.java @@ -0,0 +1,13 @@ +package io.confluent.idesidecar.restapi.cache; + +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; +import jakarta.enterprise.context.ApplicationScoped; + +/** + * Create an ApplicationScoped bean to cache SchemaRegistryClient instances + * by connection ID and schema registry client ID. + */ +@ApplicationScoped +public class SchemaRegistryClients extends Clients { + +} diff --git a/src/main/java/io/confluent/idesidecar/restapi/exceptions/ExceptionMappers.java b/src/main/java/io/confluent/idesidecar/restapi/exceptions/ExceptionMappers.java index 74223e86..adb8f111 100644 --- a/src/main/java/io/confluent/idesidecar/restapi/exceptions/ExceptionMappers.java +++ b/src/main/java/io/confluent/idesidecar/restapi/exceptions/ExceptionMappers.java @@ -11,6 +11,10 @@ import jakarta.ws.rs.core.MediaType; import jakarta.ws.rs.core.Response; import jakarta.ws.rs.core.Response.Status; +import org.apache.kafka.common.errors.ApiException; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.TopicExistsException; +import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.jboss.resteasy.reactive.server.ServerExceptionMapper; /** @@ -180,4 +184,83 @@ public Response mapInvalidInputException(InvalidInputException exception) { .header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON) .build(); } + + + @ServerExceptionMapper + public Response mapUnknownTopicException(UnknownTopicOrPartitionException exception) { + var error = io.confluent.idesidecar.restapi.kafkarest.model.Error + .builder() + .errorCode(Status.NOT_FOUND.getStatusCode()) + .message(exception.getMessage()).build(); + return Response + .status(Status.NOT_FOUND) + .entity(error) + .header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON) + .build(); + } + + @ServerExceptionMapper + public Response mapTopicAlreadyExistsException(TopicExistsException exception) { + var error = io.confluent.idesidecar.restapi.kafkarest.model.Error + .builder() + .errorCode(Status.CONFLICT.getStatusCode()) + .message(exception.getMessage()).build(); + return Response + .status(Status.CONFLICT) + .entity(error) + .header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON) + .build(); + } + + @ServerExceptionMapper + public Response mapClusterNotFoundException(ClusterNotFoundException exception) { + var error = io.confluent.idesidecar.restapi.kafkarest.model.Error + .builder() + .errorCode(Status.NOT_FOUND.getStatusCode()) + .message(exception.getMessage()).build(); + return Response + .status(Status.NOT_FOUND) + .entity(error) + .header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON) + .build(); + } + + @ServerExceptionMapper + public Response mapKafkaTimeoutException(TimeoutException exception) { + var error = io.confluent.idesidecar.restapi.kafkarest.model.Error + .builder() + .errorCode(Status.REQUEST_TIMEOUT.getStatusCode()) + .message(exception.getMessage()).build(); + return Response + .status(Status.REQUEST_TIMEOUT) + .entity(error) + .header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON) + .build(); + } + + @ServerExceptionMapper + public Response mapUnsupportedException(UnsupportedOperationException exception) { + var error = io.confluent.idesidecar.restapi.kafkarest.model.Error + .builder() + .errorCode(Status.NOT_IMPLEMENTED.getStatusCode()) + .message(exception.getMessage()).build(); + return Response + .status(Status.NOT_IMPLEMENTED) + .entity(error) + .header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON) + .build(); + } + + @ServerExceptionMapper + public Response mapApiException(ApiException exception) { + var error = io.confluent.idesidecar.restapi.kafkarest.model.Error + .builder() + .errorCode(Status.INTERNAL_SERVER_ERROR.getStatusCode()) + .message(exception.getMessage()).build(); + return Response + .status(Status.INTERNAL_SERVER_ERROR) + .entity(error) + .header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON) + .build(); + } } diff --git a/src/main/java/io/confluent/idesidecar/restapi/filters/ConnectionIdHeaderFilter.java b/src/main/java/io/confluent/idesidecar/restapi/filters/ConnectionIdHeaderFilter.java new file mode 100644 index 00000000..34a24f4f --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/filters/ConnectionIdHeaderFilter.java @@ -0,0 +1,64 @@ +package io.confluent.idesidecar.restapi.filters; + +import static io.confluent.idesidecar.restapi.util.RequestHeadersConstants.CONNECTION_ID_HEADER; + +import io.confluent.idesidecar.restapi.connections.ConnectionStateManager; +import io.confluent.idesidecar.restapi.exceptions.ConnectionNotFoundException; +import io.confluent.idesidecar.restapi.kafkarest.model.Error; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import jakarta.ws.rs.container.ContainerRequestContext; +import jakarta.ws.rs.container.ContainerRequestFilter; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.Response.Status; +import jakarta.ws.rs.ext.Provider; +import java.io.IOException; +import java.util.Optional; + +/** + * Filter to check for the presence of the connection ID header in requests to the internal Kafka + * REST endpoints, and to ensure that the connection ID corresponds to an existing connection. + */ +@Provider +@ApplicationScoped +public class ConnectionIdHeaderFilter implements ContainerRequestFilter { + + @Inject + ConnectionStateManager manager; + + @Override + public void filter(ContainerRequestContext requestContext) throws IOException { + var connectionId = Optional.ofNullable( + requestContext.getHeaderString(CONNECTION_ID_HEADER) + ); + + if (requestContext.getUriInfo().getPath().startsWith("/internal/kafka/v3")) { + if (connectionId.isEmpty()) { + requestContext.abortWith( + Response + .status(Status.BAD_REQUEST) + .entity(Error + .builder() + .errorCode(400) + .message("Missing required header: " + CONNECTION_ID_HEADER).build() + ).build() + ); + } else { + // Check that the connection exists + try { + manager.getConnectionState(connectionId.get()); + } catch (ConnectionNotFoundException e) { + requestContext.abortWith( + Response + .status(Status.NOT_FOUND) + .entity(Error + .builder() + .errorCode(404) + .message("Connection not found: " + connectionId.get()).build() + ).build() + ); + } + } + } + } +} diff --git a/src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterManager.java b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterManager.java new file mode 100644 index 00000000..e4d1a92e --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterManager.java @@ -0,0 +1,15 @@ +package io.confluent.idesidecar.restapi.kafkarest; + +import io.confluent.idesidecar.restapi.kafkarest.model.ClusterData; +import io.confluent.idesidecar.restapi.kafkarest.model.ClusterDataList; +import io.smallrye.mutiny.Uni; + +/** + * Interface for querying Kafka clusters. See {@link ClusterManagerImpl} for the implementation. + */ +public interface ClusterManager { + + Uni getKafkaCluster(String clusterId); + + Uni listKafkaClusters(); +} diff --git a/src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterManagerImpl.java b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterManagerImpl.java new file mode 100644 index 00000000..ec30f6ea --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterManagerImpl.java @@ -0,0 +1,171 @@ +package io.confluent.idesidecar.restapi.kafkarest; + +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forAcls; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forAllPartitionReassignments; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forBrokerConfigs; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forBrokers; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forCluster; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forClusters; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forConsumerGroups; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forController; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forTopics; +import static io.confluent.idesidecar.restapi.util.MutinyUtil.uniItem; +import static io.confluent.idesidecar.restapi.util.MutinyUtil.uniStage; +import static io.confluent.idesidecar.restapi.util.RequestHeadersConstants.CONNECTION_ID_HEADER; + +import io.confluent.idesidecar.restapi.cache.AdminClients; +import io.confluent.idesidecar.restapi.cache.ClusterCache; +import io.confluent.idesidecar.restapi.exceptions.ClusterNotFoundException; +import io.confluent.idesidecar.restapi.kafkarest.model.ClusterData; +import io.confluent.idesidecar.restapi.kafkarest.model.ClusterDataList; +import io.confluent.idesidecar.restapi.kafkarest.model.ResourceCollectionMetadata; +import io.confluent.idesidecar.restapi.kafkarest.model.ResourceMetadata; +import io.confluent.idesidecar.restapi.models.graph.KafkaCluster; +import io.smallrye.mutiny.Uni; +import io.smallrye.mutiny.infrastructure.Infrastructure; +import io.vertx.core.http.HttpServerRequest; +import jakarta.enterprise.context.RequestScoped; +import jakarta.inject.Inject; +import java.util.Collection; +import java.util.Optional; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.admin.DescribeClusterResult; +import org.apache.kafka.common.Node; + +/** + * RequestScoped bean for managing Kafka clusters. Creating the bean as {@link RequestScoped} allows + * us to inject the {@link HttpServerRequest} which is used to get the connection ID from the + * request headers. + */ +@RequestScoped +public class ClusterManagerImpl implements ClusterManager { + + @Inject + AdminClients adminClients; + + @Inject + ClusterCache clusterCache; + + @Inject + HttpServerRequest request; + + Supplier connectionId = () -> request.getHeader(CONNECTION_ID_HEADER); + + @Override + public Uni getKafkaCluster(String clusterId) { + return describeCluster(clusterId) + .chain(cid -> { + if (!cid.id().equals(clusterId)) { + return Uni.createFrom().failure(new ClusterNotFoundException( + "Kafka cluster '%s' not found.".formatted(clusterId) + )); + } + return uniItem(cid); + }) + .map(this::fromClusterId); + } + + @Override + public Uni listKafkaClusters() { + return uniItem( + // Get the first Kafka cluster for the connection + (Supplier>) + () -> clusterCache.getKafkaClusterForConnection(connectionId.get())) + // Run the supplier on the default worker pool since it may block + .runSubscriptionOn(Infrastructure.getDefaultWorkerPool()) + .map(Supplier::get) + // Call describeCluster if the cluster info is present + // else return null + .chain(clusterInfo -> clusterInfo + .map(kafkaClusterInfo -> this.describeCluster(kafkaClusterInfo.id())) + .orElse(Uni.createFrom().nullItem()) + ) + // Call describeCluster on each cluster ID + .chain(clusterId -> uniItem(getClusterDataList(clusterId))); + } + + private ClusterDataList getClusterDataList(ClusterDescribe cluster) { + return ClusterDataList + .builder() + .metadata(ResourceCollectionMetadata + .builder() + .self(forClusters().getRelated()) + // We don't support pagination + .next(null) + .build() + ) + .kind("KafkaClusterList") + .data(Optional + .ofNullable(cluster) + .stream() + .map(this::fromClusterId) + .collect(Collectors.toList()) + ).build(); + } + + private Uni describeCluster(String clusterId) { + return uniItem((Supplier) + () -> adminClients.getClient(connectionId.get(), clusterId)) + .runSubscriptionOn(Infrastructure.getDefaultWorkerPool()) + .map(supplier -> supplier.get().describeCluster()) + .chain(describeClusterResult -> + uniStage(describeClusterResult.clusterId().toCompletionStage()) + .map(id -> new ClusterDescribe(describeClusterResult).withId(id))) + .chain(cid -> uniStage(cid.result.controller().toCompletionStage()).map(Node::id) + .map(cid::withControllerId) + ).chain(cid -> uniStage(cid.result.nodes().toCompletionStage()) + .map(cid::withNodes) + ); + } + + private ClusterData fromClusterId(ClusterDescribe cluster) { + return ClusterData + .builder() + .kind("KafkaCluster") + .metadata(ResourceMetadata + .builder() + .self(forCluster(cluster.id()).toString()) + // TODO: Construct resource name based on the connection/cluster type + .resourceName(null) + .build() + ) + .clusterId(cluster.id()) + .acls(forAcls(cluster.id())) + .brokerConfigs(forBrokerConfigs(cluster.id())) + .brokers(forBrokers(cluster.id())) + .controller(forController(cluster.id(), cluster.controllerId())) + .consumerGroups(forConsumerGroups(cluster.id())) + .topics(forTopics(cluster.id())) + .partitionReassignments(forAllPartitionReassignments(cluster.id())) + .build(); + } + + /** + * Record to hold the KafkaFuture results of the describeCluster operation. + * Used to pass the results between the various stages of the Uni chain. + */ + private record ClusterDescribe( + DescribeClusterResult result, + String id, + Integer controllerId, + Collection nodes + ) { + ClusterDescribe(DescribeClusterResult result) { + this(result, null, null, null); + } + + ClusterDescribe withId(String id) { + return new ClusterDescribe(result, id, controllerId, nodes); + } + + ClusterDescribe withControllerId(Integer controllerId) { + return new ClusterDescribe(result, id, controllerId, nodes); + } + + ClusterDescribe withNodes(Collection nodes) { + return new ClusterDescribe(result, id, controllerId, nodes); + } + } +} diff --git a/src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterV3ApiImpl.java b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterV3ApiImpl.java new file mode 100644 index 00000000..0d186732 --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/ClusterV3ApiImpl.java @@ -0,0 +1,25 @@ +package io.confluent.idesidecar.restapi.kafkarest; + +import io.confluent.idesidecar.restapi.kafkarest.api.ClusterV3Api; +import io.confluent.idesidecar.restapi.kafkarest.model.ClusterData; +import io.confluent.idesidecar.restapi.kafkarest.model.ClusterDataList; +import io.smallrye.mutiny.Uni; +import jakarta.enterprise.context.RequestScoped; +import jakarta.inject.Inject; + +@RequestScoped +public class ClusterV3ApiImpl implements ClusterV3Api { + + @Inject + ClusterManagerImpl clusterManager; + + @Override + public Uni getKafkaCluster(String clusterId) { + return clusterManager.getKafkaCluster(clusterId); + } + + @Override + public Uni listKafkaClusters() { + return clusterManager.listKafkaClusters(); + } +} diff --git a/src/main/java/io/confluent/idesidecar/restapi/kafkarest/RelationshipUtil.java b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/RelationshipUtil.java new file mode 100644 index 00000000..1088cb00 --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/RelationshipUtil.java @@ -0,0 +1,107 @@ +package io.confluent.idesidecar.restapi.kafkarest; + +import io.confluent.idesidecar.restapi.kafkarest.model.Relationship; +import org.eclipse.microprofile.config.ConfigProvider; + +/** + * Utility class for generating {@code related} links for Kafka resources. + */ +public final class RelationshipUtil { + + private RelationshipUtil() { + } + + private static final String SIDECAR_HOST = ConfigProvider.getConfig().getValue( + "ide-sidecar.api.host", String.class); + + public static Relationship forPartitions(String clusterId, String topicId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s/topics/%s/partitions".formatted( + SIDECAR_HOST, clusterId, topicId + )).build(); + } + + public static Relationship forPartitionReassignments(String clusterId, String topicId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s/topics/%s/partitions/-/reassignment".formatted( + SIDECAR_HOST, clusterId, topicId + )).build(); + } + + public static Relationship forAllPartitionReassignments(String clusterId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s/topics/-/partitions/-/reassignment".formatted( + SIDECAR_HOST, clusterId + )).build(); + } + + public static Relationship forTopicConfigs(String clusterId, String topicId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s/topics/%s/configs".formatted( + SIDECAR_HOST, clusterId, topicId + )).build(); + } + + public static Relationship forController(String clusterId, int brokerId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s/brokers/%d".formatted( + SIDECAR_HOST, clusterId, brokerId + )).build(); + } + + public static Relationship forAcls(String clusterId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s/acls".formatted( + SIDECAR_HOST, clusterId + )).build(); + } + + public static Relationship forBrokers(String clusterId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s/brokers".formatted( + SIDECAR_HOST, clusterId + )).build(); + } + + public static Relationship forBrokerConfigs(String clusterId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s/broker-configs".formatted( + SIDECAR_HOST, clusterId + )).build(); + } + + public static Relationship forConsumerGroups(String clusterId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s/consumer-groups".formatted( + SIDECAR_HOST, clusterId + )).build(); + } + + public static Relationship forTopics(String clusterId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s/topics".formatted( + SIDECAR_HOST, clusterId + )).build(); + } + + public static Relationship forTopic(String clusterId, String topicId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s/topics/%s".formatted( + SIDECAR_HOST, clusterId, topicId + )).build(); + } + + public static Relationship forClusters() { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters".formatted( + SIDECAR_HOST + )).build(); + } + + public static Relationship forCluster(String clusterId) { + return Relationship.builder().related( + "%s/internal/kafka/v3/clusters/%s".formatted( + SIDECAR_HOST, clusterId + )).build(); + } +} diff --git a/src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicManager.java b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicManager.java new file mode 100644 index 00000000..c4dbbaa4 --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicManager.java @@ -0,0 +1,23 @@ +package io.confluent.idesidecar.restapi.kafkarest; + +import io.confluent.idesidecar.restapi.kafkarest.model.CreateTopicRequestData; +import io.confluent.idesidecar.restapi.kafkarest.model.TopicData; +import io.confluent.idesidecar.restapi.kafkarest.model.TopicDataList; +import io.smallrye.mutiny.Uni; + +/** + * Interface for managing Kafka topics. See {@link TopicManagerImpl} for the implementation. + */ +public interface TopicManager { + + Uni createKafkaTopic(String clusterId, + CreateTopicRequestData createTopicRequestData); + + Uni deleteKafkaTopic(String clusterId, String topicName); + + Uni getKafkaTopic( + String clusterId, String topicName, Boolean includeAuthorizedOperations + ); + + Uni listKafkaTopics(String clusterId, Boolean includeAuthorizedOperations); +} diff --git a/src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicManagerImpl.java b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicManagerImpl.java new file mode 100644 index 00000000..3d262145 --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicManagerImpl.java @@ -0,0 +1,168 @@ +package io.confluent.idesidecar.restapi.kafkarest; + +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forPartitionReassignments; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forPartitions; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forTopic; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forTopicConfigs; +import static io.confluent.idesidecar.restapi.kafkarest.RelationshipUtil.forTopics; +import static io.confluent.idesidecar.restapi.util.MutinyUtil.uniItem; +import static io.confluent.idesidecar.restapi.util.MutinyUtil.uniStage; +import static io.confluent.idesidecar.restapi.util.RequestHeadersConstants.CONNECTION_ID_HEADER; + +import io.confluent.idesidecar.restapi.cache.AdminClients; +import io.confluent.idesidecar.restapi.kafkarest.model.CreateTopicRequestData; +import io.confluent.idesidecar.restapi.kafkarest.model.ResourceCollectionMetadata; +import io.confluent.idesidecar.restapi.kafkarest.model.ResourceMetadata; +import io.confluent.idesidecar.restapi.kafkarest.model.TopicData; +import io.confluent.idesidecar.restapi.kafkarest.model.TopicDataList; +import io.smallrye.mutiny.Uni; +import io.vertx.core.http.HttpServerRequest; +import jakarta.enterprise.context.RequestScoped; +import jakarta.inject.Inject; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.function.Supplier; +import org.apache.kafka.clients.admin.DescribeTopicsOptions; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.TopicDescription; + +/** + * RequestScoped bean for managing Kafka topics. Creating the bean as {@link RequestScoped} allows + * us to inject the {@link HttpServerRequest} which is used to get the connection ID from the + * request headers. + */ +@RequestScoped +public class TopicManagerImpl implements TopicManager { + + @Inject + AdminClients adminClients; + + @Inject + ClusterManagerImpl clusterManager; + + @Inject + HttpServerRequest request; + + Supplier connectionId = () -> request.getHeader(CONNECTION_ID_HEADER); + + @Override + public Uni createKafkaTopic(String clusterId, + CreateTopicRequestData createTopicRequestData) { + return clusterManager.getKafkaCluster(clusterId) + .chain(ignored -> uniStage( + adminClients + .getClient(connectionId.get(), clusterId) + .createTopics(List.of(new NewTopic( + createTopicRequestData.getTopicName(), + Optional.ofNullable(createTopicRequestData.getPartitionsCount()) + .orElse(1), + Optional.ofNullable(createTopicRequestData.getReplicationFactor()) + .orElse(1).shortValue()) + )).all().toCompletionStage())) + .chain(v -> getKafkaTopic( + clusterId, + createTopicRequestData.getTopicName(), + false + )); + } + + @Override + public Uni deleteKafkaTopic(String clusterId, String topicName) { + return clusterManager.getKafkaCluster(clusterId).chain(ignored -> + uniStage( + adminClients.getClient(connectionId.get(), clusterId) + .deleteTopics(List.of(topicName)) + .all() + .toCompletionStage()) + ); + } + + @Override + public Uni getKafkaTopic( + String clusterId, String topicName, Boolean includeAuthorizedOperations + ) { + return clusterManager + .getKafkaCluster(clusterId) + .chain(ignored -> + uniStage( + adminClients + .getClient(connectionId.get(), clusterId) + .describeTopics( + List.of(topicName), getDescribeTopicsOptions(includeAuthorizedOperations)) + .allTopicNames() + .toCompletionStage() + ) + .map(topicDescriptions -> topicDescriptions.values().iterator().next()) + .onItem() + .transform(topicDescription -> fromTopicDescription(clusterId, topicDescription)) + ); + } + + private static DescribeTopicsOptions getDescribeTopicsOptions( + Boolean includeAuthorizedOperations + ) { + return new DescribeTopicsOptions() + .includeAuthorizedOperations( + Optional.ofNullable(includeAuthorizedOperations).orElse(false) + ); + } + + @Override + public Uni listKafkaTopics(String clusterId, Boolean includeAuthorizedOperations) { + return clusterManager.getKafkaCluster(clusterId).chain(ignored -> uniStage( + adminClients + .getClient(connectionId.get(), clusterId).listTopics().names().toCompletionStage() + ).chain(topicNames -> uniStage( + adminClients.getClient(connectionId.get(), clusterId) + .describeTopics(topicNames, getDescribeTopicsOptions(includeAuthorizedOperations)) + .allTopicNames() + .toCompletionStage()) + ).onItem() + .transformToUni(topicDescriptionMap -> uniItem(TopicDataList + .builder() + .kind("KafkaTopicList") + .metadata(ResourceCollectionMetadata + .builder() + .next(null) + .self(forTopics(clusterId).getRelated()) + .build() + ) + .data(topicDescriptionMap + .values() + .stream() + .map(t -> fromTopicDescription(clusterId, t)) + .toList() + ).build()) + )); + } + + private TopicData fromTopicDescription(String clusterId, TopicDescription topicDescription) { + return TopicData + .builder() + .kind("KafkaTopic") + .topicName(topicDescription.name()) + .clusterId(clusterId) + .partitionsCount(topicDescription.partitions().size()) + .replicationFactor(topicDescription.partitions().getFirst().replicas().size()) + .isInternal(topicDescription.isInternal()) + .authorizedOperations( + Optional.ofNullable(topicDescription.authorizedOperations()).orElse(Set.of()) + .stream().map(Enum::name).toList() + ) + .partitionReassignments(forPartitionReassignments(clusterId, topicDescription.name())) + .partitions(forPartitions(clusterId, topicDescription.name())) + .configs(forTopicConfigs(clusterId, topicDescription.name())) + .metadata(getTopicMetadata(clusterId, topicDescription.name())).build(); + } + + private ResourceMetadata getTopicMetadata(String clusterId, String topicName) { + return ResourceMetadata + .builder() + // TODO: Construct resource name based on the connection/cluster type + .resourceName(null) + .self(forTopic(clusterId, topicName).getRelated()) + .build(); + } + +} diff --git a/src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicV3ApiImpl.java b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicV3ApiImpl.java new file mode 100644 index 00000000..e8d7f30d --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/kafkarest/TopicV3ApiImpl.java @@ -0,0 +1,50 @@ +package io.confluent.idesidecar.restapi.kafkarest; + +import io.confluent.idesidecar.restapi.kafkarest.api.TopicV3Api; +import io.confluent.idesidecar.restapi.kafkarest.model.CreateTopicRequestData; +import io.confluent.idesidecar.restapi.kafkarest.model.TopicData; +import io.confluent.idesidecar.restapi.kafkarest.model.TopicDataList; +import io.confluent.idesidecar.restapi.kafkarest.model.UpdatePartitionCountRequestData; +import io.smallrye.mutiny.Uni; +import jakarta.enterprise.context.RequestScoped; +import jakarta.inject.Inject; + +@RequestScoped +public class TopicV3ApiImpl implements TopicV3Api { + + @Inject + TopicManagerImpl topicManager; + + @Override + public Uni createKafkaTopic( + String clusterId, CreateTopicRequestData createTopicRequestData + ) { + return topicManager.createKafkaTopic(clusterId, createTopicRequestData); + } + + @Override + public Uni deleteKafkaTopic(String clusterId, String topicName) { + return topicManager.deleteKafkaTopic(clusterId, topicName); + } + + @Override + public Uni getKafkaTopic( + String clusterId, String topicName, Boolean includeAuthorizedOperations + ) { + return topicManager.getKafkaTopic(clusterId, topicName, includeAuthorizedOperations); + } + + @Override + public Uni listKafkaTopics(String clusterId, Boolean includeAuthorizedOperations) { + return topicManager.listKafkaTopics(clusterId, includeAuthorizedOperations); + } + + @Override + public Uni updatePartitionCountKafkaTopic( + String clusterId, + String topicName, + UpdatePartitionCountRequestData updatePartitionCountRequestData + ) { + throw new UnsupportedOperationException("Not implemented yet"); + } +} diff --git a/src/main/java/io/confluent/idesidecar/restapi/messageviewer/SchemaRegistryClients.java b/src/main/java/io/confluent/idesidecar/restapi/messageviewer/SchemaRegistryClients.java deleted file mode 100644 index c7c033f1..00000000 --- a/src/main/java/io/confluent/idesidecar/restapi/messageviewer/SchemaRegistryClients.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright [2024 - 2024] Confluent Inc. - */ - -package io.confluent.idesidecar.restapi.messageviewer; - -import io.confluent.idesidecar.restapi.connections.ConnectionState; -import io.confluent.idesidecar.restapi.events.Lifecycle; -import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.ObservesAsync; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Supplier; - -/** - * Utilities to obtain and cache Schema Registry clients. - */ -@ApplicationScoped -public class SchemaRegistryClients { - - private final Map> clientsByIdByConnections = - new ConcurrentHashMap<>(); - - /** - * Get a client for the Schema Registry with the given identifier. - * - * @param connectionId the ID of the connection - * @param schemaRegistryId the identifier of the Schema Registry - * @param factory the method that will create the client if there is not already one - * @return the Schema Registry client - */ - public SchemaRegistryClient getClient( - String connectionId, - String schemaRegistryId, - Supplier factory - ) { - return clientsForConnection(connectionId).computeIfAbsent( - schemaRegistryId, - k -> factory.get() - ); - } - - private Map clientsForConnection(String connectionId) { - return clientsByIdByConnections.computeIfAbsent( - connectionId, - k -> new ConcurrentHashMap<>() - ); - } - - int clientCount() { - return clientsByIdByConnections - .values() - .stream() - .map(Map::size) - .mapToInt(Integer::intValue) - .sum(); - } - - int clientCount(String connectionId) { - return clientsForConnection(connectionId).size(); - } - - void clearClients(String connectionId) { - var oldCache = clientsByIdByConnections.put(connectionId, new ConcurrentHashMap<>()); - if (oldCache != null) { - // clean up all clients in the old cache - oldCache.forEach((id, client) -> { - try { - client.close(); - } catch (Throwable t) { - // Ignore these as we don't care - } - }); - } - } - - /** - * Respond to the connection being disconnected by clearing and closing the - * Schema Registry clients that were cached for that connection. - * - * @param connection the connection that was disconnected - */ - void onConnectionDisconnected( - @ObservesAsync @Lifecycle.Disconnected ConnectionState connection - ) { - clearClients(connection.getId()); - } - - /** - * Respond to the connection being deleted by clearing and closing the - * Schema Registry clients that were cached for that connection. - * - * @param connection the connection that was deleted - */ - void onConnectionDeleted(@ObservesAsync @Lifecycle.Deleted ConnectionState connection) { - clearClients(connection.getId()); - } -} diff --git a/src/main/java/io/confluent/idesidecar/restapi/messageviewer/strategy/ConfluentCloudConsumeStrategy.java b/src/main/java/io/confluent/idesidecar/restapi/messageviewer/strategy/ConfluentCloudConsumeStrategy.java index 0747b9e3..e1c65658 100644 --- a/src/main/java/io/confluent/idesidecar/restapi/messageviewer/strategy/ConfluentCloudConsumeStrategy.java +++ b/src/main/java/io/confluent/idesidecar/restapi/messageviewer/strategy/ConfluentCloudConsumeStrategy.java @@ -6,11 +6,11 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import io.confluent.idesidecar.restapi.application.SidecarAccessTokenBean; +import io.confluent.idesidecar.restapi.cache.SchemaRegistryClients; import io.confluent.idesidecar.restapi.connections.CCloudConnectionState; import io.confluent.idesidecar.restapi.exceptions.ProcessorFailedException; import io.confluent.idesidecar.restapi.messageviewer.DecoderUtil; import io.confluent.idesidecar.restapi.messageviewer.MessageViewerContext; -import io.confluent.idesidecar.restapi.messageviewer.SchemaRegistryClients; import io.confluent.idesidecar.restapi.messageviewer.data.SimpleConsumeMultiPartitionResponse; import io.confluent.idesidecar.restapi.messageviewer.data.SimpleConsumeMultiPartitionResponse.PartitionConsumeData; import io.confluent.idesidecar.restapi.messageviewer.data.SimpleConsumeMultiPartitionResponse.PartitionConsumeRecord; diff --git a/src/main/java/io/confluent/idesidecar/restapi/messageviewer/strategy/ConfluentLocalConsumeStrategy.java b/src/main/java/io/confluent/idesidecar/restapi/messageviewer/strategy/ConfluentLocalConsumeStrategy.java index 951e2b27..7e51a46d 100644 --- a/src/main/java/io/confluent/idesidecar/restapi/messageviewer/strategy/ConfluentLocalConsumeStrategy.java +++ b/src/main/java/io/confluent/idesidecar/restapi/messageviewer/strategy/ConfluentLocalConsumeStrategy.java @@ -1,8 +1,8 @@ package io.confluent.idesidecar.restapi.messageviewer.strategy; +import io.confluent.idesidecar.restapi.cache.SchemaRegistryClients; import io.confluent.idesidecar.restapi.exceptions.ProcessorFailedException; import io.confluent.idesidecar.restapi.messageviewer.MessageViewerContext; -import io.confluent.idesidecar.restapi.messageviewer.SchemaRegistryClients; import io.confluent.idesidecar.restapi.messageviewer.SimpleConsumer; import io.confluent.idesidecar.restapi.messageviewer.data.SimpleConsumeMultiPartitionRequest; import io.confluent.idesidecar.restapi.messageviewer.data.SimpleConsumeMultiPartitionResponse; diff --git a/src/main/java/io/confluent/idesidecar/restapi/models/graph/KafkaCluster.java b/src/main/java/io/confluent/idesidecar/restapi/models/graph/KafkaCluster.java index b6b55de2..d638b29d 100644 --- a/src/main/java/io/confluent/idesidecar/restapi/models/graph/KafkaCluster.java +++ b/src/main/java/io/confluent/idesidecar/restapi/models/graph/KafkaCluster.java @@ -1,10 +1,11 @@ package io.confluent.idesidecar.restapi.models.graph; import io.quarkus.runtime.annotations.RegisterForReflection; +import io.smallrye.common.constraint.NotNull; import io.smallrye.graphql.api.DefaultNonNull; @RegisterForReflection @DefaultNonNull public interface KafkaCluster extends Cluster { - String bootstrapServers(); + @NotNull String bootstrapServers(); } diff --git a/src/main/java/io/confluent/idesidecar/restapi/proxy/clusters/processors/ClusterProxyProcessor.java b/src/main/java/io/confluent/idesidecar/restapi/proxy/clusters/processors/ClusterProxyProcessor.java index fc0c387a..6db8f744 100644 --- a/src/main/java/io/confluent/idesidecar/restapi/proxy/clusters/processors/ClusterProxyProcessor.java +++ b/src/main/java/io/confluent/idesidecar/restapi/proxy/clusters/processors/ClusterProxyProcessor.java @@ -60,5 +60,4 @@ public Future process(ClusterProxyContext context) { return processedContext; }); } - } diff --git a/src/main/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ConfluentLocalKafkaClusterStrategy.java b/src/main/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ConfluentLocalKafkaClusterStrategy.java index f582c036..d99e0da6 100644 --- a/src/main/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ConfluentLocalKafkaClusterStrategy.java +++ b/src/main/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ConfluentLocalKafkaClusterStrategy.java @@ -1,45 +1,73 @@ package io.confluent.idesidecar.restapi.proxy.clusters.strategy; +import static io.confluent.idesidecar.restapi.util.RequestHeadersConstants.CONNECTION_ID_HEADER; + +import io.confluent.idesidecar.restapi.application.SidecarAccessTokenBean; +import io.confluent.idesidecar.restapi.proxy.clusters.ClusterProxyContext; +import io.vertx.core.MultiMap; +import io.vertx.core.http.HttpHeaders; import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; import org.eclipse.microprofile.config.inject.ConfigProperty; /** - * Strategy for processing requests and responses for a local Kafka cluster. + * Strategy for processing requests and responses for a Confluent local Kafka cluster. */ @ApplicationScoped public class ConfluentLocalKafkaClusterStrategy extends ClusterStrategy { - final String confluentLocalKafkaRestHostname; + @ConfigProperty(name = "ide-sidecar.api.host") + String sidecarHost; - public ConfluentLocalKafkaClusterStrategy( - @ConfigProperty(name = "ide-sidecar.connections.confluent-local.default.kafkarest-hostname") - String confluentLocalKafkaRestHostname - ) { - this.confluentLocalKafkaRestHostname = confluentLocalKafkaRestHostname; - } + @Inject + SidecarAccessTokenBean accessToken; + /** + * We require the connection ID header to be passed to our implementation of the Kafka REST API + * at the /internal/kafka path. This is used to identify the connection that the request is + * associated with. We also pass the access token as a Bearer token in the Authorization header. + * @param context The context of the proxy request. + * @return The headers to be passed to our implementation of the Kafka REST API. + */ + @Override + public MultiMap constructProxyHeaders(ClusterProxyContext context) { + return HttpHeaders + .headers() + .add(CONNECTION_ID_HEADER, context.getConnectionId()) + .add(HttpHeaders.AUTHORIZATION, "Bearer %s".formatted(accessToken.getToken())); + } + /** + * Route the request back to ourselves at the /internal/kafka path. + * Context: We used to send this proxy request to the Kafka REST server running in the + * confluent-local container, but now we route the request to our own implementation of the + * Kafka REST API, served at the /internal/kafka path. This was done to get early feedback + * on our in-house implementation of the Kafka REST API. + * @param requestUri The URI of the incoming request. + * @param clusterUri The Kafka REST URI running alongside the Kafka cluster. + * (unused here) + * @return The URI of the Kafka REST API running in the sidecar. + */ @Override public String constructProxyUri(String requestUri, String clusterUri) { - // Remove the /kafka prefix from the request URI since this is how the REST API - // running in Confluent Local Kafka is configured. - return uriUtil.combine(clusterUri, requestUri.replaceFirst("^(/kafka|kafka)", "")); + return uriUtil.combine( + sidecarHost, requestUri.replaceFirst("^(/kafka|kafka)", "/internal/kafka") + ); } + /** + * In addition to replacing the cluster URLs with the sidecar host, we also need to replace + * the internal Kafka REST path /internal/kafka with the external facing /kafka path. + * @param proxyResponse The response body from the Kafka REST API. + * @param clusterUri The URI of the Kafka REST API running alongside the Kafka cluster. + * (unused here) + * @param sidecarHost The host of the sidecar. + * @return The response body with the internal Kafka REST path replaced with the external path. + */ @Override public String processProxyResponse(String proxyResponse, String clusterUri, String sidecarHost) { - return super.processProxyResponse( - proxyResponse, - // We don't care for the clusterUri passed to this method since it - // points to the externally accessible URI of the Confluent Local Kafka REST API - // exposed from the running docker container. - // Instead, the response contains the docker container's internal hostname ("rest-proxy"), - // which is what we want to replace with the sidecar URI. - "http://%s".formatted(confluentLocalKafkaRestHostname), - // Since the REST API running in Confluent Local Kafka is configured to run on /v3, - // we add the /kafka to the sidecar URI before replacing the cluster URI - // with the sidecar URI. - uriUtil.combine(sidecarHost, "/kafka") - ); + return super + .processProxyResponse(proxyResponse, sidecarHost, sidecarHost) + .replaceAll("%s/internal/kafka".formatted(sidecarHost), "%s/kafka".formatted(sidecarHost)); } } diff --git a/src/main/java/io/confluent/idesidecar/restapi/resources/ClusterRestProxyResource.java b/src/main/java/io/confluent/idesidecar/restapi/resources/ClusterRestProxyResource.java index c4cc8954..d6243388 100644 --- a/src/main/java/io/confluent/idesidecar/restapi/resources/ClusterRestProxyResource.java +++ b/src/main/java/io/confluent/idesidecar/restapi/resources/ClusterRestProxyResource.java @@ -8,11 +8,11 @@ import io.quarkus.vertx.web.Route; import io.smallrye.common.annotation.Blocking; import io.vertx.core.Future; +import io.vertx.core.http.HttpHeaders; import io.vertx.ext.web.RoutingContext; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import jakarta.inject.Named; -import jakarta.ws.rs.core.HttpHeaders; import jakarta.ws.rs.core.MediaType; /** @@ -48,6 +48,15 @@ private void handleClusterProxy(RoutingContext routingContext, ClusterProxyConte routingContext.response().setStatusCode(context.getProxyResponseStatusCode()); routingContext.response().headers().addAll(context.getProxyResponseHeaders()); if (context.getProxyResponseBody() != null) { + // Set content-length header to the length of the response body + // so that the client knows when the response is complete. + // Set only if transfer-encoding is not set, as it takes precedence. + if (context.getProxyResponseHeaders().get(HttpHeaders.TRANSFER_ENCODING) == null) { + routingContext.response().putHeader( + HttpHeaders.CONTENT_LENGTH, + String.valueOf(context.getProxyResponseBody().getBytes().length) + ); + } routingContext.response().end(context.getProxyResponseBody()); } else { routingContext.response().end(); diff --git a/src/main/java/io/confluent/idesidecar/restapi/util/MutinyUtil.java b/src/main/java/io/confluent/idesidecar/restapi/util/MutinyUtil.java new file mode 100644 index 00000000..97c995ed --- /dev/null +++ b/src/main/java/io/confluent/idesidecar/restapi/util/MutinyUtil.java @@ -0,0 +1,21 @@ +package io.confluent.idesidecar.restapi.util; + +import io.smallrye.mutiny.Uni; +import java.util.concurrent.CompletionStage; + +/** + * Helper methods for working with the Mutiny library. + */ +public final class MutinyUtil { + + private MutinyUtil() { + } + + public static Uni uniStage(CompletionStage stage) { + return Uni.createFrom().completionStage(stage); + } + + public static Uni uniItem(T item) { + return Uni.createFrom().item(item); + } +} diff --git a/src/main/resources/META-INF/native-image/com.github.ben-manes.caffeine/caffeine/reflect-config.json b/src/main/resources/META-INF/native-image/com.github.ben-manes.caffeine/caffeine/reflect-config.json new file mode 100644 index 00000000..10675be8 --- /dev/null +++ b/src/main/resources/META-INF/native-image/com.github.ben-manes.caffeine/caffeine/reflect-config.json @@ -0,0 +1,26 @@ +[ + { + "name": "com.github.benmanes.caffeine.cache.PSW", + "allDeclaredConstructors": true + }, + { + "name": "com.github.benmanes.caffeine.cache.PSA", + "allDeclaredConstructors": true + }, + { + "name": "com.github.benmanes.caffeine.cache.PSWMS", + "allDeclaredConstructors": true + }, + { + "name": "com.github.benmanes.caffeine.cache.SSLA", + "allDeclaredConstructors": true + }, + { + "name": "com.github.benmanes.caffeine.cache.SSLMSW", + "allDeclaredConstructors": true + }, + { + "name": "com.github.benmanes.caffeine.cache.SSMSW", + "allDeclaredConstructors": true + } +] \ No newline at end of file diff --git a/src/main/resources/application.yml b/src/main/resources/application.yml index cbf1962f..92ecb4f2 100644 --- a/src/main/resources/application.yml +++ b/src/main/resources/application.yml @@ -119,6 +119,11 @@ ide-sidecar: enabled: true # How often to check? interval-seconds: 60 + # Configuration set by the sidecar when instantiating the Kafka Admin client + # (We may choose to allow connection-specific overrides in the future) + admin-client-configs: + "default.api.timeout.ms": 3000 + "request.timeout.ms": 3000 quarkus: application: @@ -127,6 +132,11 @@ quarkus: enabled: false log: min-level: INFO + category: + "org.apache.kafka.clients": + level: ERROR + "org.apache.kafka.common": + level: ERROR http: host: 127.0.0.1 port: 26636 @@ -170,3 +180,10 @@ quarkus: # Disable file caching in Vert.X since the native executable includes the path of the tmp dir # of the machine where the native image was built. caching: false + +# Hide the /internal/kafka route from the OpenAPI spec +mp: + openapi: + scan: + exclude: + packages: io.confluent.idesidecar.restapi.kafkarest diff --git a/src/test/java/io/confluent/idesidecar/restapi/messageviewer/SchemaRegistryClientsTest.java b/src/test/java/io/confluent/idesidecar/restapi/cache/SchemaRegistryClientsTest.java similarity index 96% rename from src/test/java/io/confluent/idesidecar/restapi/messageviewer/SchemaRegistryClientsTest.java rename to src/test/java/io/confluent/idesidecar/restapi/cache/SchemaRegistryClientsTest.java index b9690152..61dd016f 100644 --- a/src/test/java/io/confluent/idesidecar/restapi/messageviewer/SchemaRegistryClientsTest.java +++ b/src/test/java/io/confluent/idesidecar/restapi/cache/SchemaRegistryClientsTest.java @@ -1,8 +1,4 @@ -/* - * Copyright [2024 - 2024] Confluent Inc. - */ - -package io.confluent.idesidecar.restapi.messageviewer; +package io.confluent.idesidecar.restapi.cache; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; diff --git a/src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/ClusterV3ApiImplIT.java b/src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/ClusterV3ApiImplIT.java new file mode 100644 index 00000000..d3c86765 --- /dev/null +++ b/src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/ClusterV3ApiImplIT.java @@ -0,0 +1,103 @@ +package io.confluent.idesidecar.restapi.kafkarest.api; + +import static io.confluent.idesidecar.restapi.util.ResourceIOUtil.loadResource; +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.equalTo; +import static io.confluent.idesidecar.restapi.testutil.QueryResourceUtil.queryGraphQLRaw; + +import io.confluent.idesidecar.restapi.testutil.NoAccessFilterProfile; +import io.confluent.idesidecar.restapi.util.ConfluentLocalKafkaWithRestProxyContainer; +import io.quarkus.test.junit.QuarkusIntegrationTest; +import io.quarkus.test.junit.TestProfile; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + + +@QuarkusIntegrationTest +@Tag("io.confluent.common.utils.IntegrationTest") +@TestProfile(NoAccessFilterProfile.class) +public class ClusterV3ApiImplIT extends KafkaRestTestBed { + @Test + void shouldListKafkaClusters() { + // Try to list Kafka clusters when none are available, we should get an empty list + given() + .header("X-connection-id", CONNECTION_ID) + .when() + .get("/internal/kafka/v3/clusters") + .then() + .statusCode(200) + .body("data.size()", equalTo(0)); + + // Issue GraphQL query to create a Kafka cluster + // The internal Kafka REST implementation _intentionally_ does not have + // the ability to discover and fetch metadata about Kafka clusters that it + // does not already know about. + + // Issue a get local connections GraphQL query. We don't care about the response. + // By issuing the query, GraphQL will try and discover the + // Confluent local Kafka cluster by hitting its kafka-rest server running at + // http://localhost:8082, upon which the cluster details get cached in the ClusterCache. + // The internal Kafka REST implementation then looks in the ClusterCache to fetch + // metadata about Kafka clusters. + queryGraphQLRaw(loadResource("graph/real/local-connections-query.graphql")); + + // And now, we should be able to list the Kafka cluster + given() + .header("X-connection-id", CONNECTION_ID) + .when() + .get("/internal/kafka/v3/clusters") + .then() + .statusCode(200) + .body("data.size()", equalTo(1)) + .body("data[0].cluster_id", + equalTo(ConfluentLocalKafkaWithRestProxyContainer.CLUSTER_ID)); + } + + @Test + void shouldGetKafkaCluster() { + given() + .header("X-connection-id", CONNECTION_ID) + .when() + .get("/internal/kafka/v3/clusters/{cluster_id}", + ConfluentLocalKafkaWithRestProxyContainer.CLUSTER_ID) + .then() + .statusCode(200) + .body("cluster_id", equalTo(ConfluentLocalKafkaWithRestProxyContainer.CLUSTER_ID)); + } + + @Test + void shouldReturn404WhenClusterNotFound() { + given() + .header("X-connection-id", CONNECTION_ID) + .when() + .get("/internal/kafka/v3/clusters/{cluster_id}", + "non-existent-cluster") + .then() + .statusCode(404) + .body("error_code", equalTo(404)) + .body("message", equalTo("Kafka cluster 'non-existent-cluster' not found.")); + } + + @Test + void shouldRaiseErrorWhenConnectionIdIsMissing() { + given() + .when() + .get("/internal/kafka/v3/clusters") + .then() + .statusCode(400) + .body("error_code", equalTo(400)) + .body("message", equalTo("Missing required header: x-connection-id")); + } + + @Test + void shouldRaiseErrorWhenConnectionNotFound() { + given() + .header("X-connection-id", "non-existent-connection") + .when() + .get("/internal/kafka/v3/clusters") + .then() + .statusCode(404) + .body("error_code", equalTo(404)) + .body("message", equalTo("Connection not found: non-existent-connection")); + } +} \ No newline at end of file diff --git a/src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/KafkaRestTestBed.java b/src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/KafkaRestTestBed.java new file mode 100644 index 00000000..fc74109c --- /dev/null +++ b/src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/KafkaRestTestBed.java @@ -0,0 +1,75 @@ +package io.confluent.idesidecar.restapi.kafkarest.api; + +import static io.restassured.RestAssured.given; + +import io.confluent.idesidecar.restapi.models.ConnectionSpec; +import io.confluent.idesidecar.restapi.models.ConnectionSpec.ConnectionType; +import io.confluent.idesidecar.restapi.util.ConfluentLocalKafkaWithRestProxyContainer; +import io.confluent.idesidecar.restapi.util.KafkaTestBed; +import io.restassured.http.ContentType; +import java.util.Properties; +import org.eclipse.microprofile.config.ConfigProvider; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.testcontainers.containers.wait.strategy.Wait; + +public class KafkaRestTestBed extends KafkaTestBed { + private static ConfluentLocalKafkaWithRestProxyContainer confluentLocal; + protected static final String CONNECTION_ID = "test-connection"; + + private static final Integer testPort = ConfigProvider.getConfig() + .getValue("quarkus.http.test-port", Integer.class); + + @BeforeAll + static void setup() { + confluentLocal = new ConfluentLocalKafkaWithRestProxyContainer() + .waitingFor(Wait.forLogMessage( + ".*Server started, listening for requests.*\\n", 1)) + // Kafka REST server port + .waitingFor(Wait.forListeningPorts( + ConfluentLocalKafkaWithRestProxyContainer.REST_PROXY_PORT + )); + confluentLocal.start(); + + // Create a connection + KafkaRestTestBed.createConnection(); + } + + private static void createConnection() { + given() + .contentType(ContentType.JSON) + .body(new ConnectionSpec( + CONNECTION_ID, + CONNECTION_ID, + // Connection type does not matter for this test... yet + ConnectionType.LOCAL, + null, + null + )) + .when().post("http://localhost:%s/gateway/v1/connections".formatted( + testPort)) + .then() + .statusCode(200); + } + + private static void deleteConnection() { + given() + .when().delete("http://localhost:%s/gateway/v1/connections/%s".formatted( + testPort, CONNECTION_ID)) + .then() + .statusCode(204); + } + + @AfterAll + static void teardown() { + confluentLocal.stop(); + deleteConnection(); + } + + @Override + protected Properties getKafkaProperties() { + Properties properties = new Properties(); + properties.put("bootstrap.servers", confluentLocal.getKafkaBootstrapServers()); + return properties; + } +} diff --git a/src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/TopicV3ApiImplIT.java b/src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/TopicV3ApiImplIT.java new file mode 100644 index 00000000..d7575893 --- /dev/null +++ b/src/test/java/io/confluent/idesidecar/restapi/kafkarest/api/TopicV3ApiImplIT.java @@ -0,0 +1,141 @@ +package io.confluent.idesidecar.restapi.kafkarest.api; + +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.equalTo; + +import io.confluent.idesidecar.restapi.testutil.NoAccessFilterProfile; +import io.confluent.idesidecar.restapi.util.ConfluentLocalKafkaWithRestProxyContainer; +import io.quarkus.test.junit.QuarkusIntegrationTest; +import io.quarkus.test.junit.TestProfile; +import io.restassured.specification.RequestSpecification; +import java.util.Map; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +@QuarkusIntegrationTest +@Tag("io.confluent.common.utils.IntegrationTest") +@TestProfile(NoAccessFilterProfile.class) +class TopicV3ApiImplIT extends KafkaRestTestBed { + + private static RequestSpecification spec() { + var clusterId = ConfluentLocalKafkaWithRestProxyContainer.CLUSTER_ID; + return given() + .header("X-connection-id", CONNECTION_ID) + .when() + .pathParams(Map.of("cluster_id", clusterId)); + } + + @Test + void shouldCreateKafkaTopic() throws Exception { + createTopic("test-topic-1"); + + // Get topic should contain the topic name + spec() + .get("/internal/kafka/v3/clusters/{cluster_id}/topics/test-topic-1") + .then() + .statusCode(200) + .body("topic_name", equalTo("test-topic-1")); + + // List topics should contain the topic name + spec() + .get("/internal/kafka/v3/clusters/{cluster_id}/topics") + .then() + .statusCode(200) + // Could be at any index + .body("data.find { it.topic_name == 'test-topic-1' }.topic_name", equalTo("test-topic-1")); + + deleteTopic("test-topic-1"); + } + + @Test + void shouldDeleteKafkaTopic() throws Exception { + createTopic("test-topic-delete-me"); + + // Delete topic should return 204 + spec() + .delete("/internal/kafka/v3/clusters/{cluster_id}/topics/test-topic-delete-me") + .then() + .statusCode(204); + + // List topics should not contain the topic name + spec() + .get("/internal/kafka/v3/clusters/{cluster_id}/topics") + .then() + .statusCode(200) + .body("data.find { it.topic_name == 'test-topic-delete-me' }", equalTo(null)); + } + + @Test + void shouldRaise404WhenGettingNonExistentTopic() { + spec() + .get("/internal/kafka/v3/clusters/{cluster_id}/topics/non-existent-topic") + .then() + .statusCode(404) + .body("error_code", equalTo(404)) + .body("message", equalTo("This server does not host this topic-partition.")); + } + + @Test + void shouldRaise404WhenDeletingNonExistentTopic() { + spec() + .delete("/internal/kafka/v3/clusters/{cluster_id}/topics/non-existent-topic") + .then() + .statusCode(404) + .body("error_code", equalTo(404)) + .body("message", equalTo("This server does not host this topic-partition.")); + } + + @Test + void shouldRaise409WhenCreatingExistingTopic() throws Exception { + createTopic("test-topic-2"); + + spec() + .body("{\"topic_name\":\"test-topic-2\"}") + .header("Content-Type", "application/json") + .post("/internal/kafka/v3/clusters/{cluster_id}/topics") + .then() + .statusCode(409) + .body("error_code", equalTo(409)) + .body("message", equalTo("Topic 'test-topic-2' already exists.")); + + deleteTopic("test-topic-2"); + } + + @Test + void shouldRaise404OnNonExistentCluster() { + given() + .header("X-connection-id", CONNECTION_ID) + .when() + .get("/internal/kafka/v3/clusters/non-existent-cluster/topics") + .then() + .statusCode(404) + .body("error_code", equalTo(404)) + .body("message", equalTo("Kafka cluster 'non-existent-cluster' not found.")); + } + + @Test + void shouldRaise404OnNonExistentConnection() { + given() + .header("X-connection-id", "non-existent-connection") + .when() + .get("/internal/kafka/v3/clusters/{cluster_id}/topics", + Map.of("cluster_id", ConfluentLocalKafkaWithRestProxyContainer.CLUSTER_ID)) + .then() + .statusCode(404) + .body("error_code", equalTo(404)) + .body("message", equalTo("Connection not found: non-existent-connection")); + } + + @Test + void shouldRaise400OnAbsentConnectionIdHeader() { + given() + // No connection ID header + .when() + .get("/internal/kafka/v3/clusters/{cluster_id}/topics", + Map.of("cluster_id", ConfluentLocalKafkaWithRestProxyContainer.CLUSTER_ID)) + .then() + .statusCode(400) + .body("error_code", equalTo(400)) + .body("message", equalTo("Missing required header: x-connection-id")); + } +} \ No newline at end of file diff --git a/src/test/java/io/confluent/idesidecar/restapi/messageviewer/SimpleConsumerIT.java b/src/test/java/io/confluent/idesidecar/restapi/messageviewer/SimpleConsumerIT.java index 01a1c40e..147cee9b 100644 --- a/src/test/java/io/confluent/idesidecar/restapi/messageviewer/SimpleConsumerIT.java +++ b/src/test/java/io/confluent/idesidecar/restapi/messageviewer/SimpleConsumerIT.java @@ -71,7 +71,7 @@ public static void tearDown() { testBed.stop(); } - void recreateTopic(String topic) throws ExecutionException, InterruptedException, TimeoutException { + void recreateTopic(String topic) throws Exception { if (testBed.topicExists(topic)) { testBed.deleteTopic(topic); testBed.waitForTopicCreation(topic, Duration.ofSeconds(30)); @@ -81,7 +81,7 @@ void recreateTopic(String topic) throws ExecutionException, InterruptedException } @Test - void testAvroProduceAndConsume() throws ExecutionException, InterruptedException, TimeoutException { + void testAvroProduceAndConsume() throws Exception { String topic = "myavromessage1"; recreateTopic(topic); diff --git a/src/test/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ClusterStrategyTest.java b/src/test/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ClusterStrategyTest.java index b682f833..e33a71a9 100644 --- a/src/test/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ClusterStrategyTest.java +++ b/src/test/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ClusterStrategyTest.java @@ -3,14 +3,20 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; +import io.quarkus.test.junit.QuarkusTest; import java.util.stream.Stream; +import org.eclipse.microprofile.config.ConfigProvider; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +@QuarkusTest class ClusterStrategyTest { + static final int TEST_PORT = ConfigProvider.getConfig() + .getValue("quarkus.http.test-port", Integer.class); + static class ClusterStrategyImpl extends ClusterStrategy { } @@ -27,7 +33,7 @@ void testProcessProxyResponse( var actualResponse = baseClusterStrategy.processProxyResponse( proxyResponse, clusterUri, - "http://localhost:26637" + "http://localhost:%s".formatted(TEST_PORT) ); assertEquals(expectedResponse, actualResponse); } @@ -48,10 +54,10 @@ private static Stream testProcessProxyResponse() { """ { "partitions": { - "related": "http://localhost:26637/kafka/v3/clusters/lkc-95w6wy/topics/my_topic/partitions" + "related": "http://localhost:%s/kafka/v3/clusters/lkc-95w6wy/topics/my_topic/partitions" } } - """ + """.formatted(TEST_PORT) ), // When the returned URLs contain a port Arguments.of( @@ -67,10 +73,10 @@ private static Stream testProcessProxyResponse() { """ { "partitions": { - "related": "http://localhost:26637/kafka/v3/clusters/lkc-95w6wy/topics/my_topic/partitions" + "related": "http://localhost:%s/kafka/v3/clusters/lkc-95w6wy/topics/my_topic/partitions" } } - """ + """.formatted(TEST_PORT) ), // When the stored cluster URI has a port, but the returned URLs do not Arguments.of( @@ -86,10 +92,10 @@ private static Stream testProcessProxyResponse() { """ { "partitions": { - "related": "http://localhost:26637/kafka/v3/clusters/lkc-95w6wy/topics/my_topic/partitions" + "related": "http://localhost:%s/kafka/v3/clusters/lkc-95w6wy/topics/my_topic/partitions" } } - """ + """.formatted(TEST_PORT) ), // When both the stored cluster URI and the returned URLs have ports Arguments.of( @@ -106,10 +112,10 @@ private static Stream testProcessProxyResponse() { """ { "partitions": { - "related": "http://localhost:26637/kafka/v3/clusters/lkc-95w6wy/topics/my_topic/partitions" + "related": "http://localhost:%s/kafka/v3/clusters/lkc-95w6wy/topics/my_topic/partitions" } } - """ + """.formatted(TEST_PORT) )); } diff --git a/src/test/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ConfluentLocalKafkaClusterStrategyTest.java b/src/test/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ConfluentLocalKafkaClusterStrategyTest.java index b2faae56..1e11a278 100644 --- a/src/test/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ConfluentLocalKafkaClusterStrategyTest.java +++ b/src/test/java/io/confluent/idesidecar/restapi/proxy/clusters/strategy/ConfluentLocalKafkaClusterStrategyTest.java @@ -3,9 +3,9 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import io.quarkus.test.junit.QuarkusTest; +import jakarta.inject.Inject; import java.util.stream.Stream; import org.eclipse.microprofile.config.ConfigProvider; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -13,16 +13,11 @@ @QuarkusTest class ConfluentLocalKafkaClusterStrategyTest { - private static final String CONFLUENT_LOCAL_KAFKAREST_HOSTNAME = ConfigProvider - .getConfig() - .getValue("ide-sidecar.connections.confluent-local.default.kafkarest-hostname", String.class); + private static final int TEST_PORT = ConfigProvider.getConfig() + .getValue("quarkus.http.test-port", Integer.class); - ClusterStrategy strategy; - - @BeforeEach - void setUp() { - strategy = new ConfluentLocalKafkaClusterStrategy(CONFLUENT_LOCAL_KAFKAREST_HOSTNAME); - } + @Inject + ConfluentLocalKafkaClusterStrategy strategy; @ParameterizedTest @MethodSource @@ -40,26 +35,26 @@ private static Stream testConstructProxyUri() { Arguments.of( "/kafka/v3/clusters/my-cluster/topics", - "http://localhost:8082", - "http://localhost:8082/v3/clusters/my-cluster/topics" + "http://localhost:%s".formatted(TEST_PORT), + "http://localhost:%s/internal/kafka/v3/clusters/my-cluster/topics".formatted(TEST_PORT) ), Arguments.of( "kafka/v3/clusters/my-cluster/topics", - "http://localhost:8082", - "http://localhost:8082/v3/clusters/my-cluster/topics" + "http://localhost:%s".formatted(TEST_PORT), + "http://localhost:%s/internal/kafka/v3/clusters/my-cluster/topics".formatted(TEST_PORT) ), Arguments.of( "kafka/v3/clusters/my-cluster/topics", - "http://localhost:8082/", - "http://localhost:8082/v3/clusters/my-cluster/topics" + "http://localhost:%s/".formatted(TEST_PORT), + "http://localhost:%s/internal/kafka/v3/clusters/my-cluster/topics".formatted(TEST_PORT) ), Arguments.of( "/kafka/v3/clusters/my-cluster/topics", - "http://localhost:8082/", - "http://localhost:8082/v3/clusters/my-cluster/topics" + "http://localhost:%s/".formatted(TEST_PORT), + "http://localhost:%s/internal/kafka/v3/clusters/my-cluster/topics".formatted(TEST_PORT) ) ); } @@ -69,11 +64,11 @@ void testProcessProxyResponse() { String proxyResponse = """ { "partitions": { - "related": "http://rest-proxy:8082/v3/clusters/lkc-95w6wy/topics/my_topic/partitions" + "related": "http://localhost:%s/internal/kafka/v3/clusters/lkc-95w6wy/topics/my_topic/partitions" } } - """; - String clusterUri = "http://rest-proxy:8082"; + """.formatted(TEST_PORT); + String clusterUri = "http://localhost:%s".formatted(TEST_PORT); String expectedResponse = """ { "partitions": { @@ -84,7 +79,7 @@ void testProcessProxyResponse() { var actualResponse = strategy.processProxyResponse( proxyResponse, clusterUri, - "http://localhost:26637" + "http://localhost:%s".formatted(TEST_PORT) ); assertEquals(expectedResponse, actualResponse); } diff --git a/src/test/java/io/confluent/idesidecar/restapi/resources/ClusterRestProxyResourceTest.java b/src/test/java/io/confluent/idesidecar/restapi/resources/ClusterRestProxyResourceTest.java index 37129eea..36fcc367 100644 --- a/src/test/java/io/confluent/idesidecar/restapi/resources/ClusterRestProxyResourceTest.java +++ b/src/test/java/io/confluent/idesidecar/restapi/resources/ClusterRestProxyResourceTest.java @@ -185,100 +185,6 @@ void testKafkaRestProxyThrows400IfClusterIdInPathDoesNotMatchHeader() { containsString("Cluster id in the header does not match the one in the path.")); } - @ParameterizedTest - @MethodSource("validClusterRequests") - void testBadClusterInfoReturnsError( - ConnectionType connectionType, - ClusterType clusterType, - String path - ) { - - // Given an authenticated connection - ccloudTestUtil.createAuthedConnection(CONNECTION_ID, connectionType); - - // And given a cluster in the cache - expectClusterInCache( - clusterCache, - CONNECTION_ID, - CLUSTER_ID, - "http://invalid-host:%d".formatted(wireMockPort), - clusterType - ); - - // Then requests to the cluster will return error - given() - .when() - .headers(CLUSTER_REQUEST_HEADERS) - .get(path) - .then() - .statusCode(500) - .contentType(MediaType.APPLICATION_JSON) - .body("title", is("Something went wrong while proxying request")) - .body("errors[0].detail", containsString("invalid-host")); - } - - - private static Stream validClusterRequests() { - return Stream.of( - Arguments.of( - // Type of connection - ConnectionType.CCLOUD, - // Type of cluster - ClusterType.KAFKA, - // The sidecar proxy path to hit - "/kafka/v3/clusters/%s/topics".formatted(CLUSTER_ID), - // The remote cluster endpoint to wiremock - "/kafka/v3/clusters/%s/topics".formatted(CLUSTER_ID)), - Arguments.of( - ConnectionType.LOCAL, ClusterType.KAFKA, - "/kafka/v3/clusters/%s/topics".formatted(CLUSTER_ID), - "/v3/clusters/%s/topics".formatted(CLUSTER_ID)), - Arguments.of(ConnectionType.CCLOUD, ClusterType.SCHEMA_REGISTRY, - "/subjects/fake-subject/versions/fake-version/schema", - "/subjects/fake-subject/versions/fake-version/schema"), - Arguments.of(ConnectionType.CCLOUD, ClusterType.SCHEMA_REGISTRY, - "/schemas/id/fake-schema-id/subjects", - "/schemas/id/fake-schema-id/subjects") - ); - } - - @ParameterizedTest - @MethodSource("validClusterRequests") - void testBasicProxyingClusterRequests( - ConnectionType connectionType, - ClusterType clusterType, - String path, - String wireMockPath - ) { - wireMock.register( - WireMock.get(wireMockPath) - .willReturn( - WireMock.aResponse() - .withStatus(204))); - - // Given an authenticated connection - ccloudTestUtil.createAuthedConnection(CONNECTION_ID, connectionType); - - // And given a cluster in the cache - expectClusterInCache( - clusterCache, - CONNECTION_ID, - CLUSTER_ID, - "http://localhost:%d".formatted(wireMockPort), - clusterType - ); - - // Then requests to the cluster will be accepted - given() - .when() - .headers(CLUSTER_REQUEST_HEADERS) - .get(path) - .then() - .statusCode(204) - .body(emptyString()); - } - - @Test void testKafkaRestProxyAgainstCCloud() throws Throwable { // Given an authenticated CCloud connection @@ -443,66 +349,6 @@ void testUnauthedKafkaRestProxyAgainstCCloud() { .body("title", containsString("Unauthorized")); } - @Test - void testKafkaRestAgainstConfluentLocal() throws Throwable { - // Given a connection - ccloudTestUtil.createAuthedConnection(CONNECTION_ID, ConnectionType.LOCAL); - - // And given a kafka cluster in the cache - expectClusterInCache( - clusterCache, - CONNECTION_ID, - CLUSTER_ID, - "http://localhost:%d".formatted(wireMockPort), - ClusterType.KAFKA - ); - - // Given we have a fake Confluent Local Kafka REST server endpoint for list topics - wireMock.register( - // Notice how the mocked endpoint is /v3/clusters/%s/topics, this is what - // the sidecar tries to hit in case of Confluent Local Kafka REST - WireMock.get("/v3/clusters/%s/topics".formatted(CLUSTER_ID)) - .willReturn( - WireMock.aResponse() - .withStatus(200) - .withHeader("Content-Type", "application/json") - .withHeader("x-local-specific-header", "fake-value") - .withBody( - new String(Objects.requireNonNull( - Thread - .currentThread() - .getContextClassLoader() - .getResourceAsStream( - "kafka-rest-proxy-mock-responses/" - + "list-topics-delegate-local-response.json") - ).readAllBytes()))).atPriority(100)); - - // When we hit the Sidecar Kafka proxy endpoint with the - // right connection ID and cluster ID - var actualResponse = given() - .when() - .headers(CLUSTER_REQUEST_HEADERS) - .get("/kafka/v3/clusters/%s/topics".formatted(CLUSTER_ID)) - .then(); - - // Then we should get a 200 response - actualResponse.statusCode(200); - // The response should have the correct headers - actualResponse.header("Content-Type", "application/json"); - actualResponse.header("x-local-specific-header", "fake-value"); - - var actualResponseBody = actualResponse.extract().asString(); - var expectedResponseBody = new String(Objects.requireNonNull( - Thread - .currentThread() - .getContextClassLoader() - .getResourceAsStream( - "kafka-rest-proxy-mock-responses/list-topics-sidecar-proxy-response.json") - ).readAllBytes()); - // Then the response body should be the same as the expected response body - assertEquals(expectedResponseBody, actualResponseBody); - } - private static Stream invalidClusterRequests() { return Stream.of( // Local Schema Registry is not supported diff --git a/src/test/java/io/confluent/idesidecar/restapi/util/ConfluentLocalKafkaWithRestProxyContainer.java b/src/test/java/io/confluent/idesidecar/restapi/util/ConfluentLocalKafkaWithRestProxyContainer.java index ad7e68aa..858806e0 100644 --- a/src/test/java/io/confluent/idesidecar/restapi/util/ConfluentLocalKafkaWithRestProxyContainer.java +++ b/src/test/java/io/confluent/idesidecar/restapi/util/ConfluentLocalKafkaWithRestProxyContainer.java @@ -67,11 +67,11 @@ public class ConfluentLocalKafkaWithRestProxyContainer extends GenericContainer implements AutoCloseable { private static final int KAFKA_PORT = 9092; - private static final int REST_PROXY_PORT = 8082; private static final String DEFAULT_IMAGE = "confluentinc/confluent-local:7.6.0"; private static final String CONTAINER_NAME = "confluent-local-broker-1"; private static final String REST_PROXY_HOST_NAME = "rest-proxy"; - private static final String CLUSTER_ID = "oh-sxaDRTcyAr6pFRbXyzA"; + public static final int REST_PROXY_PORT = 8082; + public static final String CLUSTER_ID = "oh-sxaDRTcyAr6pFRbXyzA"; public ConfluentLocalKafkaWithRestProxyContainer() { this(DEFAULT_IMAGE); diff --git a/src/test/java/io/confluent/idesidecar/restapi/util/ConfluentLocalTestBed.java b/src/test/java/io/confluent/idesidecar/restapi/util/ConfluentLocalTestBed.java index dca21081..978756eb 100644 --- a/src/test/java/io/confluent/idesidecar/restapi/util/ConfluentLocalTestBed.java +++ b/src/test/java/io/confluent/idesidecar/restapi/util/ConfluentLocalTestBed.java @@ -33,7 +33,7 @@ import java.util.concurrent.TimeoutException; import org.testcontainers.shaded.org.awaitility.Awaitility; -public class ConfluentLocalTestBed implements AutoCloseable { +public class ConfluentLocalTestBed extends KafkaTestBed implements AutoCloseable { private static final String KAFKA_INTERNAL_LISTENER = "PLAINTEXT://confluent-local-broker-1:29092"; private final Network network; private final ConfluentLocalKafkaWithRestProxyContainer confluent; @@ -98,56 +98,7 @@ private boolean isSchemaRegistryReady() { } } - public void createTopic(String topicName, int partitions, short replicationFactor) throws InterruptedException, ExecutionException, TimeoutException { - try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { - NewTopic newTopic = new NewTopic(topicName, partitions, replicationFactor); - adminClient.createTopics(Collections.singleton(newTopic)).all().get(30, TimeUnit.SECONDS); - } - } - - public void deleteTopic(String topicName) throws InterruptedException, ExecutionException, TimeoutException { - try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { - adminClient.deleteTopics(Collections.singleton(topicName)).all().get(30, TimeUnit.SECONDS); - } - } - - public void waitForTopicCreation(String topicName, Duration timeout) throws InterruptedException, ExecutionException, TimeoutException { - try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { - long pollIntervalMillis = Math.min(timeout.toMillis() / 10, 10000); // Poll every 10 second or less, based on timeout - - Awaitility.await() - .atMost(timeout.toMillis(), TimeUnit.MILLISECONDS) - .pollInterval(pollIntervalMillis, TimeUnit.MILLISECONDS) - .until(() -> adminClient.listTopics().names().get().contains(topicName)); - } - } - - public boolean topicExists(String topicName) throws InterruptedException, ExecutionException, TimeoutException { - try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { - return adminClient.listTopics().names().get(30, TimeUnit.SECONDS).contains(topicName); - } - } - - public void waitForTopicDeletion(String topicName, Duration timeout) throws InterruptedException, ExecutionException, TimeoutException { - long startTime = System.currentTimeMillis(); - try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { - while (System.currentTimeMillis() - startTime < timeout.toMillis()) { - if (!adminClient.listTopics().names().get().contains(topicName)) { - return; - } - Thread.sleep(1000); - } - throw new TimeoutException("Timed out waiting for topic deletion: " + topicName); - } - } - - public Set listTopics() throws InterruptedException, ExecutionException, TimeoutException { - try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { - ListTopicsResult topics = adminClient.listTopics(); - return topics.names().get(30, TimeUnit.SECONDS); - } - } - + @Override public Properties getKafkaProperties() { Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, confluent.getKafkaBootstrapServers()); diff --git a/src/test/java/io/confluent/idesidecar/restapi/util/KafkaTestBed.java b/src/test/java/io/confluent/idesidecar/restapi/util/KafkaTestBed.java new file mode 100644 index 00000000..7081235b --- /dev/null +++ b/src/test/java/io/confluent/idesidecar/restapi/util/KafkaTestBed.java @@ -0,0 +1,73 @@ +package io.confluent.idesidecar.restapi.util; + +import java.time.Duration; +import java.util.Collections; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.admin.ListTopicsResult; +import org.apache.kafka.clients.admin.NewTopic; +import org.testcontainers.shaded.org.awaitility.Awaitility; + +public abstract class KafkaTestBed { + + public void createTopic(String topicName, int partitions, short replicationFactor) + throws Exception { + try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { + NewTopic newTopic = new NewTopic(topicName, partitions, replicationFactor); + adminClient.createTopics(Collections.singleton(newTopic)).all().get(30, TimeUnit.SECONDS); + } + } + + public void createTopic(String topicName) throws Exception { + createTopic(topicName, 1, (short) 1); + } + + public void deleteTopic(String topicName) throws Exception { + try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { + adminClient.deleteTopics(Collections.singleton(topicName)).all().get(30, TimeUnit.SECONDS); + } + } + + public void waitForTopicCreation(String topicName, Duration timeout) { + try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { + long pollIntervalMillis = Math.min(timeout.toMillis() / 10, + 10000); // Poll every 10 second or less, based on timeout + + Awaitility.await() + .atMost(timeout.toMillis(), TimeUnit.MILLISECONDS) + .pollInterval(pollIntervalMillis, TimeUnit.MILLISECONDS) + .until(() -> adminClient.listTopics().names().get().contains(topicName)); + } + } + + public boolean topicExists(String topicName) throws Exception{ + try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { + return adminClient.listTopics().names().get(30, TimeUnit.SECONDS).contains(topicName); + } + } + + public void waitForTopicDeletion(String topicName, Duration timeout) throws Exception{ + long startTime = System.currentTimeMillis(); + try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { + while (System.currentTimeMillis() - startTime < timeout.toMillis()) { + if (!adminClient.listTopics().names().get().contains(topicName)) { + return; + } + Thread.sleep(1000); + } + throw new TimeoutException("Timed out waiting for topic deletion: " + topicName); + } + } + + public Set listTopics() throws Exception { + try (AdminClient adminClient = AdminClient.create(getKafkaProperties())) { + ListTopicsResult topics = adminClient.listTopics(); + return topics.names().get(30, TimeUnit.SECONDS); + } + } + + abstract protected Properties getKafkaProperties(); +}