From cf5f6e8ff6c0e002783e438054bd557eed5a74c2 Mon Sep 17 00:00:00 2001 From: Alexey Zorkaltsev Date: Tue, 25 Jul 2023 01:18:33 +0300 Subject: [PATCH 01/12] chore(build): fix build under Windows --- package-lock.json | 28 ++++++++++++++++++++++++++++ package.json | 5 +++-- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/package-lock.json b/package-lock.json index cb8e31e9..02c8df4f 100644 --- a/package-lock.json +++ b/package-lock.json @@ -34,6 +34,7 @@ "@types/node": "^16.11.3", "@typescript-eslint/eslint-plugin": "^5.7.0", "@typescript-eslint/parser": "^5.7.0", + "cross-env": "^7.0.3", "eslint": "^8.4.1", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-airbnb-typescript": "^16.1.0", @@ -3630,6 +3631,24 @@ "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", "dev": true }, + "node_modules/cross-env": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-env/-/cross-env-7.0.3.tgz", + "integrity": "sha512-+/HKd6EgcQCJGh2PSjZuUitQBQynKor4wrFbRg4DtAgS1aWO+gU52xpH7M9ScGgXSYmAVS9bIJ8EzuaGw0oNAw==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.1" + }, + "bin": { + "cross-env": "src/bin/cross-env.js", + "cross-env-shell": "src/bin/cross-env-shell.js" + }, + "engines": { + "node": ">=10.14", + "npm": ">=6", + "yarn": ">=1" + } + }, "node_modules/cross-spawn": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", @@ -16514,6 +16533,15 @@ "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", "dev": true }, + "cross-env": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-env/-/cross-env-7.0.3.tgz", + "integrity": "sha512-+/HKd6EgcQCJGh2PSjZuUitQBQynKor4wrFbRg4DtAgS1aWO+gU52xpH7M9ScGgXSYmAVS9bIJ8EzuaGw0oNAw==", + "dev": true, + "requires": { + "cross-spawn": "^7.0.1" + } + }, "cross-spawn": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", diff --git a/package.json b/package.json index 508bfea2..58ca535a 100644 --- a/package.json +++ b/package.json @@ -23,6 +23,7 @@ "homepage": "https://github.com/yandex-cloud/nodejs-sdk#readme", "dependencies": { "@grpc/grpc-js": "^1.6.12", + "abort-controller-x": "^0.4.1", "axios": "^0.24.0", "jsonwebtoken": "^9.0.0", "lodash": "^4.17.21", @@ -31,7 +32,6 @@ "luxon": "^2.2.0", "nice-grpc": "^1.0.6", "nice-grpc-client-middleware-deadline": "^1.0.6", - "abort-controller-x": "^0.4.1", "node-abort-controller": "^3.1.1", "protobufjs": "^7.2.4", "utility-types": "^3.10.0" @@ -47,6 +47,7 @@ "@types/node": "^16.11.3", "@typescript-eslint/eslint-plugin": "^5.7.0", "@typescript-eslint/parser": "^5.7.0", + "cross-env": "^7.0.3", "eslint": "^8.4.1", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-airbnb-typescript": "^16.1.0", @@ -67,7 +68,7 @@ "scripts": { "test": "jest -c config/jest.ts --passWithNoTests", "lint": "eslint src config", - "build": "NODE_OPTIONS=\"--max-old-space-size=4096\" tsc -p .", + "build": "cross-env NODE_OPTIONS=\"--max-old-space-size=4096\" tsc -p .", "generate-code": "ts-node scripts/generate-code.ts", "prepare": "husky install", "prepublishOnly": "npm run build" From e952cc80f7ff82efb039ec4413a38c3dbd394973 Mon Sep 17 00:00:00 2001 From: Nikolay Matrosov Date: Mon, 7 Aug 2023 21:49:36 +0200 Subject: [PATCH 02/12] chore: fix endpoints. Closes #136 --- src/service-endpoints.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index 918ba622..9b16e5e3 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -2,7 +2,7 @@ import { ServiceClientConstructor, ServiceDefinition, } from '@grpc/grpc-js'; -import { GeneratedServiceClientCtor } from './types'; +import {GeneratedServiceClientCtor} from './types'; interface ServiceEndpoint { serviceIds: string[]; @@ -204,21 +204,21 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ }, { serviceIds: [ + 'yandex.cloud.iot.devices.v1.DeviceService', 'yandex.cloud.iot.devices.v1.RegistryService', ], endpoint: 'iot-devices.api.cloud.yandex.net:443', }, { serviceIds: [ - 'yandex.cloud.iot.devices.v1.RegistryDataService', - 'yandex.cloud.iot.devices.v1.DeviceService', + 'yandex.cloud.iot.broker.v1.BrokerDataService', 'yandex.cloud.iot.devices.v1.DeviceDataService', + 'yandex.cloud.iot.devices.v1.RegistryDataService', ], endpoint: 'iot-data.api.cloud.yandex.net:443', }, { serviceIds: [ - 'yandex.cloud.iot.broker.v1.BrokerDataService', 'yandex.cloud.iot.broker.v1.BrokerService', ], endpoint: 'iot-broker.api.cloud.yandex.net:443', From a262a173f6156afa794b292354100c6e64d116d4 Mon Sep 17 00:00:00 2001 From: Nikolay Matrosov Date: Mon, 7 Aug 2023 21:59:34 +0200 Subject: [PATCH 03/12] lint: fix spaces --- src/service-endpoints.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index 9b16e5e3..9d426654 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -2,7 +2,7 @@ import { ServiceClientConstructor, ServiceDefinition, } from '@grpc/grpc-js'; -import {GeneratedServiceClientCtor} from './types'; +import { GeneratedServiceClientCtor } from './types'; interface ServiceEndpoint { serviceIds: string[]; From f446a643db076852aadefb7e3d4cda4f1761764e Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Wed, 9 Aug 2023 10:15:55 +0300 Subject: [PATCH 04/12] fix: fake commit for triggering release --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 58ca535a..0d34b8c1 100644 --- a/package.json +++ b/package.json @@ -4,8 +4,8 @@ "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", - "cloud", - "sdk" + "sdk", + "cloud" ], "repository": { "type": "git", From 43e418416bb250f053088ed82eb2f78692228d71 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Wed, 9 Aug 2023 07:17:37 +0000 Subject: [PATCH 05/12] chore(release): 2.4.9 [skip ci] ## [2.4.9](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.4.8...v2.4.9) (2023-08-09) ### Bug Fixes * fake commit for triggering release ([f446a64](https://github.com/yandex-cloud/nodejs-sdk/commit/f446a643db076852aadefb7e3d4cda4f1761764e)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 02c8df4f..921b4027 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.8", + "version": "2.4.9", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.8", + "version": "2.4.9", "license": "MIT", "dependencies": { "@grpc/grpc-js": "^1.6.12", diff --git a/package.json b/package.json index 0d34b8c1..b9261fc7 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.8", + "version": "2.4.9", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From 45b3aba15623c30037afafd761946faae51cad00 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Thu, 14 Sep 2023 15:39:37 +0300 Subject: [PATCH 06/12] feat: update cloudapi --- cloudapi | 2 +- scripts/services.ts | 36 + src/generated/google/rpc/code.ts | 294 + src/generated/yandex/cloud/ai/index.ts | 2 + .../yandex/cloud/ai/llm/v1alpha/llm.ts | 448 + .../cloud/ai/llm/v1alpha/llm_service.ts | 1178 +++ src/generated/yandex/cloud/ai/ocr/v1/ocr.ts | 774 ++ .../yandex/cloud/ai/ocr/v1/ocr_service.ts | 534 ++ src/generated/yandex/cloud/ai/stt/v3/stt.ts | 2906 +++++- .../yandex/cloud/ai/stt/v3/stt_service.ts | 179 + src/generated/yandex/cloud/ai/tts/v3/tts.ts | 14 + .../cloud/ai/vision/v1/vision_service.ts | 8 +- .../yandex/cloud/apploadbalancer/index.ts | 1 + .../apploadbalancer/v1/http_router_service.ts | 51 +- .../cloud/apploadbalancer/v1/load_balancer.ts | 172 + .../v1/load_balancer_service.ts | 112 + .../cloud/apploadbalancer/v1/logging.ts | 392 + .../cloud/apploadbalancer/v1/virtual_host.ts | 451 +- .../v1/virtual_host_service.ts | 72 +- src/generated/yandex/cloud/backup/index.ts | 7 + .../yandex/cloud/backup/v1/backup.ts | 1773 ++++ .../yandex/cloud/backup/v1/backup_service.ts | 2145 +++++ .../yandex/cloud/backup/v1/policy.ts | 2913 ++++++ .../yandex/cloud/backup/v1/policy_service.ts | 1802 ++++ .../cloud/backup/v1/provider_service.ts | 501 ++ .../yandex/cloud/backup/v1/resource.ts | 827 ++ .../cloud/backup/v1/resource_service.ts | 1563 ++++ .../yandex/cloud/billing/v1/sku_service.ts | 4 +- src/generated/yandex/cloud/cdn/v1/raw_logs.ts | 9 +- src/generated/yandex/cloud/cdn/v1/resource.ts | 121 + .../yandex/cloud/cdn/v1/resource_service.ts | 266 + .../certificatemanager/v1/certificate.ts | 18 +- src/generated/yandex/cloud/compute/index.ts | 2 + src/generated/yandex/cloud/compute/v1/disk.ts | 20 + .../cloud/compute/v1/disk_placement_group.ts | 132 +- .../v1/disk_placement_group_service.ts | 60 +- .../yandex/cloud/compute/v1/disk_service.ts | 299 +- .../cloud/compute/v1/filesystem_service.ts | 32 +- .../yandex/cloud/compute/v1/gpu_cluster.ts | 455 + .../cloud/compute/v1/gpu_cluster_service.ts | 2097 +++++ .../cloud/compute/v1/host_group_service.ts | 33 +- .../yandex/cloud/compute/v1/image_service.ts | 32 +- .../yandex/cloud/compute/v1/instance.ts | 144 + .../cloud/compute/v1/instance_service.ts | 424 +- .../v1/instancegroup/instance_group.ts | 280 +- .../instancegroup/instance_group_service.ts | 429 +- .../cloud/compute/v1/placement_group.ts | 125 + .../compute/v1/placement_group_service.ts | 55 +- .../cloud/compute/v1/snapshot_schedule.ts | 50 +- .../compute/v1/snapshot_schedule_service.ts | 344 +- .../cloud/compute/v1/snapshot_service.ts | 30 +- .../yandex/cloud/containerregistry/index.ts | 2 + .../cloud/containerregistry/v1/image.ts | 2 +- .../cloud/containerregistry/v1/repository.ts | 2 +- .../cloud/containerregistry/v1/scan_policy.ts | 521 ++ .../v1/scan_policy_service.ts | 979 +++ .../yandex/cloud/dataproc/v1/cluster.ts | 2 +- .../cloud/dataproc/v1/cluster_service.ts | 2 +- src/generated/yandex/cloud/dataproc/v1/job.ts | 2 +- .../yandex/cloud/datasphere/index.ts | 10 +- .../v1/node_execution_error_details.ts | 152 + .../cloud/datasphere/v1/node_service.ts | 213 + .../yandex/cloud/datasphere/v2/community.ts | 331 + .../cloud/datasphere/v2/community_service.ts | 1682 ++++ .../yandex/cloud/datasphere/v2/dataset.ts | 762 ++ .../yandex/cloud/datasphere/v2/project.ts | 814 ++ .../cloud/datasphere/v2/project_service.ts | 3409 +++++++ .../yandex/cloud/datasphere/v2/secret.ts | 430 + .../yandex/cloud/datasphere/v2/user.ts | 160 + .../yandex/cloud/datatransfer/index.ts | 6 +- .../yandex/cloud/datatransfer/v1/endpoint.ts | 90 + .../datatransfer/v1/endpoint/clickhouse.ts | 16 + .../cloud/datatransfer/v1/endpoint/common.ts | 633 +- .../cloud/datatransfer/v1/endpoint/kafka.ts | 1008 +++ .../cloud/datatransfer/v1/endpoint/mongo.ts | 13 + .../cloud/datatransfer/v1/endpoint/mysql.ts | 87 +- .../cloud/datatransfer/v1/endpoint/parsers.ts | 404 + .../datatransfer/v1/endpoint/postgres.ts | 149 +- .../datatransfer/v1/endpoint/serializers.ts | 450 + .../cloud/datatransfer/v1/endpoint/ydb.ts | 419 + .../cloud/datatransfer/v1/endpoint_service.ts | 27 +- .../yandex/cloud/datatransfer/v1/transfer.ts | 9 + .../cloud/datatransfer/v1/transfer_service.ts | 26 +- .../yandex/cloud/dns/v1/dns_zone_service.ts | 238 + .../yandex/cloud/iam/v1/user_account.ts | 2 +- src/generated/yandex/cloud/index.ts | 2 + .../iot/broker/v1/broker_data_service.ts | 6 +- .../yandex/cloud/iot/devices/v1/registry.ts | 179 + .../cloud/iot/devices/v1/registry_service.ts | 735 ++ src/generated/yandex/cloud/k8s/v1/cluster.ts | 303 + .../yandex/cloud/k8s/v1/cluster_service.ts | 258 + src/generated/yandex/cloud/k8s/v1/node.ts | 291 +- src/generated/yandex/cloud/kms/index.ts | 8 +- .../asymmetric_encryption_crypto_service.ts | 577 ++ .../asymmetric_encryption_key.ts | 501 ++ .../asymmetric_encryption_key_service.ts | 1973 +++++ .../asymmetric_signature_crypto_service.ts | 811 ++ .../asymmetric_signature_key.ts | 565 ++ .../asymmetric_signature_key_service.ts | 1967 +++++ .../loadbalancer/v1/network_load_balancer.ts | 18 +- .../v1/network_load_balancer_service.ts | 36 +- .../cloud/loadbalancer/v1/target_group.ts | 2 +- .../cloud/loadtesting/agent/v1/agent.ts | 103 + .../agent/v1/agent_registration_service.ts | 480 + .../loadtesting/agent/v1/agent_service.ts | 370 + .../cloud/loadtesting/agent/v1/job_service.ts | 1484 ++++ .../agent/v1/monitoring_service.ts | 525 ++ .../yandex/cloud/loadtesting/agent/v1/test.ts | 1144 +++ .../loadtesting/agent/v1/test_service.ts | 1193 +++ .../loadtesting/agent/v1/trail_service.ts | 888 ++ .../cloud/loadtesting/api/v1/agent/agent.ts | 207 + .../api/v1/agent/create_compute_instance.ts | 498 ++ .../cloud/loadtesting/api/v1/agent/status.ts | 132 + .../cloud/loadtesting/api/v1/agent_service.ts | 799 ++ .../yandex/cloud/loadtesting/index.ts | 12 + src/generated/yandex/cloud/logging/index.ts | 6 +- .../yandex/cloud/logging/v1/export.ts | 187 + .../yandex/cloud/logging/v1/export_service.ts | 521 ++ .../cloud/logging/v1/log_ingestion_service.ts | 2 +- .../cloud/logging/v1/log_reading_service.ts | 2 +- src/generated/yandex/cloud/logging/v1/sink.ts | 520 ++ .../yandex/cloud/logging/v1/sink_service.ts | 1846 ++++ .../yandex/cloud/marketplace/index.ts | 7 + .../marketplace/licensemanager/v1/instance.ts | 389 + .../licensemanager/v1/instance_service.ts | 441 + .../marketplace/licensemanager/v1/lock.ts | 284 + .../licensemanager/v1/lock_service.ts | 813 ++ .../marketplace/licensemanager/v1/template.ts | 327 + .../v1/image_product_usage_service.ts | 354 + .../marketplace/metering/v1/usage_record.ts | 422 + .../yandex/cloud/mdb/clickhouse/v1/backup.ts | 89 + .../yandex/cloud/mdb/clickhouse/v1/cluster.ts | 23 + .../mdb/clickhouse/v1/cluster_service.ts | 314 +- .../mdb/clickhouse/v1/config/clickhouse.ts | 1355 ++- .../yandex/cloud/mdb/clickhouse/v1/user.ts | 2161 ++++- .../cloud/mdb/greenplum/v1/backup_service.ts | 154 + .../yandex/cloud/mdb/greenplum/v1/cluster.ts | 224 + .../cloud/mdb/greenplum/v1/cluster_service.ts | 243 + .../yandex/cloud/mdb/greenplum/v1/config.ts | 2717 +++++- .../yandex/cloud/mdb/greenplum/v1/pxf.ts | 394 + src/generated/yandex/cloud/mdb/index.ts | 21 + .../yandex/cloud/mdb/kafka/v1/cluster.ts | 1339 +-- .../yandex/cloud/mdb/kafka/v1/common.ts | 38 + .../yandex/cloud/mdb/kafka/v1/topic.ts | 16 +- .../yandex/cloud/mdb/kafka/v1/user.ts | 24 + .../yandex/cloud/mdb/mysql/v1/backup.ts | 93 + .../cloud/mdb/mysql/v1/backup_service.ts | 154 + .../yandex/cloud/mdb/mysql/v1/cluster.ts | 14 + .../cloud/mdb/mysql/v1/cluster_service.ts | 15 + .../cloud/mdb/mysql/v1/config/mysql5_7.ts | 438 +- .../cloud/mdb/mysql/v1/config/mysql8_0.ts | 329 +- .../yandex/cloud/mdb/mysql/v1/user.ts | 21 + .../yandex/cloud/mdb/opensearch/v1/auth.ts | 307 + .../yandex/cloud/mdb/opensearch/v1/backup.ts | 280 + .../cloud/mdb/opensearch/v1/backup_service.ts | 434 + .../yandex/cloud/mdb/opensearch/v1/cluster.ts | 2454 ++++++ .../mdb/opensearch/v1/cluster_service.ts | 7809 +++++++++++++++++ .../mdb/opensearch/v1/config/opensearch.ts | 277 + .../cloud/mdb/opensearch/v1/maintenance.ts | 561 ++ .../mdb/opensearch/v1/resource_preset.ts | 170 + .../opensearch/v1/resource_preset_service.ts | 479 + .../yandex/cloud/mdb/postgresql/v1/backup.ts | 18 +- .../cloud/mdb/postgresql/v1/backup_service.ts | 185 + .../yandex/cloud/mdb/postgresql/v1/cluster.ts | 403 +- .../mdb/postgresql/v1/cluster_service.ts | 282 +- .../cloud/mdb/postgresql/v1/config/host15.ts | 2028 +++++ .../mdb/postgresql/v1/config/host15_1c.ts | 2037 +++++ .../cloud/mdb/postgresql/v1/config/host16.ts | 2082 +++++ .../mdb/postgresql/v1/config/host16_1c.ts | 2092 +++++ .../mdb/postgresql/v1/config/postgresql10.ts | 182 + .../postgresql/v1/config/postgresql10_1c.ts | 182 + .../mdb/postgresql/v1/config/postgresql11.ts | 259 + .../postgresql/v1/config/postgresql11_1c.ts | 259 + .../mdb/postgresql/v1/config/postgresql12.ts | 259 + .../postgresql/v1/config/postgresql12_1c.ts | 259 + .../mdb/postgresql/v1/config/postgresql13.ts | 259 + .../postgresql/v1/config/postgresql13_1c.ts | 259 + .../mdb/postgresql/v1/config/postgresql14.ts | 259 + .../postgresql/v1/config/postgresql14_1c.ts | 259 + .../mdb/postgresql/v1/config/postgresql15.ts | 4619 ++++++++++ .../postgresql/v1/config/postgresql15_1c.ts | 4684 ++++++++++ .../mdb/postgresql/v1/config/postgresql16.ts | 4627 ++++++++++ .../postgresql/v1/config/postgresql16_1c.ts | 4690 ++++++++++ .../cloud/mdb/postgresql/v1/database.ts | 61 +- .../mdb/postgresql/v1/database_service.ts | 30 + .../cloud/mdb/postgresql/v1/perf_diag.ts | 1643 ++++ .../mdb/postgresql/v1/perf_diag_service.ts | 722 ++ .../yandex/cloud/mdb/postgresql/v1/user.ts | 291 +- .../cloud/mdb/postgresql/v1/user_service.ts | 31 +- .../yandex/cloud/mdb/redis/v1/cluster.ts | 38 + .../cloud/mdb/redis/v1/cluster_service.ts | 72 + .../yandex/cloud/mdb/redis/v1/config/redis.ts | 658 ++ .../cloud/mdb/redis/v1/config/redis6_2.ts | 24 + .../cloud/mdb/redis/v1/config/redis7_0.ts | 24 + src/generated/yandex/cloud/oauth/claims.ts | 66 + .../yandex/cloud/organizationmanager/index.ts | 3 + .../cloud/organizationmanager/v1/group.ts | 2 +- .../organizationmanager/v1/group_mapping.ts | 212 + .../v1/group_mapping_service.ts | 1754 ++++ .../organizationmanager/v1/saml/federation.ts | 17 +- .../v1/saml/federation_service.ts | 22 + .../v1/ssh_certificate_service.ts | 344 + .../serverless/apigateway/v1/apigateway.ts | 617 ++ .../apigateway/v1/apigateway_service.ts | 408 +- .../serverless/containers/v1/container.ts | 474 + .../containers/v1/container_service.ts | 330 + .../cloud/serverless/functions/v1/function.ts | 703 +- .../functions/v1/function_service.ts | 304 +- .../cloud/serverless/triggers/v1/trigger.ts | 271 + .../serverless/triggers/v1/trigger_service.ts | 18 + src/generated/yandex/cloud/service_clients.ts | 34 +- .../yandex/cloud/storage/v1/bucket.ts | 651 ++ .../yandex/cloud/storage/v1/bucket_service.ts | 129 +- src/generated/yandex/cloud/vpc/v1/address.ts | 17 + .../yandex/cloud/vpc/v1/address_service.ts | 34 + src/generated/yandex/cloud/vpc/v1/gateway.ts | 2 +- src/generated/yandex/cloud/vpc/v1/subnet.ts | 2 +- src/generated/yandex/cloud/ydb/v1/database.ts | 15 + 218 files changed, 120384 insertions(+), 2627 deletions(-) create mode 100644 src/generated/google/rpc/code.ts create mode 100644 src/generated/yandex/cloud/ai/llm/v1alpha/llm.ts create mode 100644 src/generated/yandex/cloud/ai/llm/v1alpha/llm_service.ts create mode 100644 src/generated/yandex/cloud/ai/ocr/v1/ocr.ts create mode 100644 src/generated/yandex/cloud/ai/ocr/v1/ocr_service.ts create mode 100644 src/generated/yandex/cloud/apploadbalancer/v1/logging.ts create mode 100644 src/generated/yandex/cloud/backup/index.ts create mode 100644 src/generated/yandex/cloud/backup/v1/backup.ts create mode 100644 src/generated/yandex/cloud/backup/v1/backup_service.ts create mode 100644 src/generated/yandex/cloud/backup/v1/policy.ts create mode 100644 src/generated/yandex/cloud/backup/v1/policy_service.ts create mode 100644 src/generated/yandex/cloud/backup/v1/provider_service.ts create mode 100644 src/generated/yandex/cloud/backup/v1/resource.ts create mode 100644 src/generated/yandex/cloud/backup/v1/resource_service.ts create mode 100644 src/generated/yandex/cloud/compute/v1/gpu_cluster.ts create mode 100644 src/generated/yandex/cloud/compute/v1/gpu_cluster_service.ts create mode 100644 src/generated/yandex/cloud/containerregistry/v1/scan_policy.ts create mode 100644 src/generated/yandex/cloud/containerregistry/v1/scan_policy_service.ts create mode 100644 src/generated/yandex/cloud/datasphere/v1/node_execution_error_details.ts create mode 100644 src/generated/yandex/cloud/datasphere/v2/community.ts create mode 100644 src/generated/yandex/cloud/datasphere/v2/community_service.ts create mode 100644 src/generated/yandex/cloud/datasphere/v2/dataset.ts create mode 100644 src/generated/yandex/cloud/datasphere/v2/project.ts create mode 100644 src/generated/yandex/cloud/datasphere/v2/project_service.ts create mode 100644 src/generated/yandex/cloud/datasphere/v2/secret.ts create mode 100644 src/generated/yandex/cloud/datasphere/v2/user.ts create mode 100644 src/generated/yandex/cloud/datatransfer/v1/endpoint/kafka.ts create mode 100644 src/generated/yandex/cloud/datatransfer/v1/endpoint/parsers.ts create mode 100644 src/generated/yandex/cloud/datatransfer/v1/endpoint/serializers.ts create mode 100644 src/generated/yandex/cloud/datatransfer/v1/endpoint/ydb.ts create mode 100644 src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_crypto_service.ts create mode 100644 src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_key.ts create mode 100644 src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_key_service.ts create mode 100644 src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_crypto_service.ts create mode 100644 src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_key.ts create mode 100644 src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_key_service.ts create mode 100644 src/generated/yandex/cloud/loadtesting/agent/v1/agent.ts create mode 100644 src/generated/yandex/cloud/loadtesting/agent/v1/agent_registration_service.ts create mode 100644 src/generated/yandex/cloud/loadtesting/agent/v1/agent_service.ts create mode 100644 src/generated/yandex/cloud/loadtesting/agent/v1/job_service.ts create mode 100644 src/generated/yandex/cloud/loadtesting/agent/v1/monitoring_service.ts create mode 100644 src/generated/yandex/cloud/loadtesting/agent/v1/test.ts create mode 100644 src/generated/yandex/cloud/loadtesting/agent/v1/test_service.ts create mode 100644 src/generated/yandex/cloud/loadtesting/agent/v1/trail_service.ts create mode 100644 src/generated/yandex/cloud/loadtesting/api/v1/agent/agent.ts create mode 100644 src/generated/yandex/cloud/loadtesting/api/v1/agent/create_compute_instance.ts create mode 100644 src/generated/yandex/cloud/loadtesting/api/v1/agent/status.ts create mode 100644 src/generated/yandex/cloud/loadtesting/api/v1/agent_service.ts create mode 100644 src/generated/yandex/cloud/loadtesting/index.ts create mode 100644 src/generated/yandex/cloud/logging/v1/export.ts create mode 100644 src/generated/yandex/cloud/logging/v1/export_service.ts create mode 100644 src/generated/yandex/cloud/logging/v1/sink.ts create mode 100644 src/generated/yandex/cloud/logging/v1/sink_service.ts create mode 100644 src/generated/yandex/cloud/marketplace/licensemanager/v1/instance.ts create mode 100644 src/generated/yandex/cloud/marketplace/licensemanager/v1/instance_service.ts create mode 100644 src/generated/yandex/cloud/marketplace/licensemanager/v1/lock.ts create mode 100644 src/generated/yandex/cloud/marketplace/licensemanager/v1/lock_service.ts create mode 100644 src/generated/yandex/cloud/marketplace/licensemanager/v1/template.ts create mode 100644 src/generated/yandex/cloud/marketplace/metering/v1/image_product_usage_service.ts create mode 100644 src/generated/yandex/cloud/marketplace/metering/v1/usage_record.ts create mode 100644 src/generated/yandex/cloud/mdb/greenplum/v1/pxf.ts create mode 100644 src/generated/yandex/cloud/mdb/opensearch/v1/auth.ts create mode 100644 src/generated/yandex/cloud/mdb/opensearch/v1/backup.ts create mode 100644 src/generated/yandex/cloud/mdb/opensearch/v1/backup_service.ts create mode 100644 src/generated/yandex/cloud/mdb/opensearch/v1/cluster.ts create mode 100644 src/generated/yandex/cloud/mdb/opensearch/v1/cluster_service.ts create mode 100644 src/generated/yandex/cloud/mdb/opensearch/v1/config/opensearch.ts create mode 100644 src/generated/yandex/cloud/mdb/opensearch/v1/maintenance.ts create mode 100644 src/generated/yandex/cloud/mdb/opensearch/v1/resource_preset.ts create mode 100644 src/generated/yandex/cloud/mdb/opensearch/v1/resource_preset_service.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/host15.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/host15_1c.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/host16.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/host16_1c.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql15.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql15_1c.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql16.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql16_1c.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/perf_diag.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/perf_diag_service.ts create mode 100644 src/generated/yandex/cloud/mdb/redis/v1/config/redis.ts create mode 100644 src/generated/yandex/cloud/organizationmanager/v1/group_mapping.ts create mode 100644 src/generated/yandex/cloud/organizationmanager/v1/group_mapping_service.ts create mode 100644 src/generated/yandex/cloud/organizationmanager/v1/ssh_certificate_service.ts diff --git a/cloudapi b/cloudapi index a321f934..1023048f 160000 --- a/cloudapi +++ b/cloudapi @@ -1 +1 @@ -Subproject commit a321f9341d37fcbc849a4b38f7e659724a051ff1 +Subproject commit 1023048f50d4bcfc296e178b40529305ec5443e1 diff --git a/scripts/services.ts b/scripts/services.ts index eea8f8f6..103a9bff 100644 --- a/scripts/services.ts +++ b/scripts/services.ts @@ -8,6 +8,7 @@ export const servicesConfig: ServicesConfig = { tts_service: { importClassName: 'SynthesizerClient' }, vision_service: { importClassName: 'VisionServiceClient' }, vision_image_classifier_service: { importClassName: 'ImageClassifierServiceClient' }, + ocr_service: { importClassName: 'TextRecognitionServiceClient' }, }, apploadbalancer: { backend_group_service: { importClassName: 'BackendGroupServiceClient' }, @@ -15,6 +16,13 @@ export const servicesConfig: ServicesConfig = { load_balancer_service: { importClassName: 'LoadBalancerServiceClient' }, target_group_service: { importClassName: 'TargetGroupServiceClient', exportClassName: 'AlbTargetGroupServiceClient' }, virtual_host_service: { importClassName: 'VirtualHostServiceClient' }, + resource_service: { importClassName: 'ResourceServiceClient' }, + }, + backup: { + backup_service: { importClassName: 'BackupServiceClient' }, + policy_service: { importClassName: 'PolicyServiceClient' }, + provider_service: { importClassName: 'ProviderServiceClient' }, + resource_service: { importClassName: 'ResourceServiceClient' }, }, billing: { billing_account_service: { importClassName: 'BillingAccountServiceClient' }, @@ -49,6 +57,7 @@ export const servicesConfig: ServicesConfig = { zone_service: { importClassName: 'ZoneServiceClient' }, instance_group_service: { importClassName: 'InstanceGroupServiceClient' }, snapshot_schedule_service: { importClassName: 'SnapshotScheduleServiceClient' }, + gpu_cluster_service: { importClassName: 'GpuClusterServiceClient' }, }, containerregistry: { image_service: { importClassName: 'ImageServiceClient', exportClassName: 'CrImageServiceClient' }, @@ -56,6 +65,7 @@ export const servicesConfig: ServicesConfig = { registry_service: { importClassName: 'RegistryServiceClient' }, repository_service: { importClassName: 'RepositoryServiceClient' }, scanner_service: { importClassName: 'ScannerServiceClient' }, + scan_policy_service: { importClassName: 'ScanPolicyServiceClient' }, }, dataproc: { cluster_service: { importClassName: 'ClusterServiceClient', exportClassName: 'DataProcClusterServiceClient' }, @@ -71,6 +81,7 @@ export const servicesConfig: ServicesConfig = { node_service: { importClassName: 'NodeServiceClient' }, project_data_service: { importClassName: 'ProjectDataServiceClient' }, project_service: { importClassName: 'ProjectServiceClient' }, + community_service: { importClassName: 'CommunityServiceClient' }, }, datatransfer: { endpoint_service: { importClassName: 'EndpointServiceClient' }, @@ -106,6 +117,20 @@ export const servicesConfig: ServicesConfig = { kms: { symmetric_crypto_service: { importClassName: 'SymmetricCryptoServiceClient' }, symmetric_key_service: { importClassName: 'SymmetricKeyServiceClient' }, + asymmetric_encryption_crypto_service: { importClassName: 'AsymmetricEncryptionCryptoServiceClient' }, + asymmetric_encryption_key_service: { importClassName: 'AsymmetricEncryptionKeyServiceClient' }, + asymmetric_signature_crypto_service: { importClassName: 'AsymmetricSignatureCryptoServiceClient' }, + asymmetric_signature_key_service: { importClassName: 'AsymmetricSignatureKeyServiceClient' }, + + }, + loadtesting: { + agent_agent_registration_service: { importClassName: 'AgentRegistrationServiceClient' }, + agent_service: { importClassName: 'AgentServiceClient' }, + agent_job_service: { importClassName: 'JobServiceClient' }, + agent_monitoring_service: { importClassName: 'MonitoringServiceClient' }, + agent_test_service: { importClassName: 'TestServiceClient' }, + agent_trail_service: { importClassName: 'TrailServiceClient' }, + api_agent_service: { importClassName: 'AgentServiceClient' }, }, loadbalancer: { network_load_balancer_service: { importClassName: 'NetworkLoadBalancerServiceClient' }, @@ -119,9 +144,14 @@ export const servicesConfig: ServicesConfig = { log_group_service: { importClassName: 'LogGroupServiceClient' }, log_ingestion_service: { importClassName: 'LogIngestionServiceClient' }, log_reading_service: { importClassName: 'LogReadingServiceClient' }, + export_service: { importClassName: 'ExportServiceClient' }, + sink_service: { importClassName: 'SinkServiceClient' }, }, marketplace: { image_product_usage_service: { importClassName: 'ImageProductUsageServiceClient' }, + licensemanager_instance_service: { importClassName: 'InstanceServiceClient' }, + licensemanager_lock_service: { importClassName: 'LockServiceClient' }, + metering_image_product_usage_service: { importClassName: 'ImageProductUsageServiceClient' }, }, mdb: { clickhouse_backup_service: { importClassName: 'BackupServiceClient', exportClassName: 'ClickHouseBackupServiceClient' }, @@ -156,11 +186,15 @@ export const servicesConfig: ServicesConfig = { mysql_database_service: { importClassName: 'DatabaseServiceClient', exportClassName: 'MysqlDatabaseServiceClient' }, mysql_resource_preset_service: { importClassName: 'ResourcePresetServiceClient', exportClassName: 'MysqlResourcePresetServiceClient' }, mysql_user_service: { importClassName: 'UserServiceClient', exportClassName: 'MysqlUserServiceClient' }, + opensearch_backup_service: { importClassName: 'BackupServiceClient', exportClassName: 'OpenSearchBackupServiceClient' }, + opensearch_cluster_service: { importClassName: 'ClusterServiceClient', exportClassName: 'OpenSearchClusterServiceClient' }, + opensearch_resource_preset_service: { importClassName: 'ResourcePresetServiceClient', exportClassName: 'OpenSearchResourcePresetServiceClient' }, postgresql_backup_service: { importClassName: 'BackupServiceClient', exportClassName: 'PgsqlBackupServiceClient' }, postgresql_cluster_service: { importClassName: 'ClusterServiceClient', exportClassName: 'PgsqlClusterServiceClient' }, postgresql_database_service: { importClassName: 'DatabaseServiceClient', exportClassName: 'PgsqlDatabaseServiceClient' }, postgresql_resource_preset_service: { importClassName: 'ResourcePresetServiceClient', exportClassName: 'PgsqlResourcePresetServiceClient' }, postgresql_user_service: { importClassName: 'UserServiceClient', exportClassName: 'PgsqlUserServiceClient' }, + postgresql_perf_diag_service: { importClassName: 'PerformanceDiagnosticsServiceClient', exportClassName: 'PgsqlPerformanceDiagnosticsServiceClient' }, redis_backup_service: { importClassName: 'BackupServiceClient', exportClassName: 'RedisBackupServiceClient' }, redis_cluster_service: { importClassName: 'ClusterServiceClient', exportClassName: 'RedisClusterServiceClient' }, redis_resource_preset_service: { importClassName: 'ResourcePresetServiceClient', exportClassName: 'RedisResourcePresetServiceClient' }, @@ -182,6 +216,8 @@ export const servicesConfig: ServicesConfig = { user_service: { importClassName: 'UserServiceClient' }, certificate_service: { importClassName: 'CertificateServiceClient', exportClassName: 'OmCertificateServiceClient' }, federation_service: { importClassName: 'FederationServiceClient' }, + group_mapping_service: { importClassName: 'GroupMappingServiceClient' }, + ssh_certificate_service: { importClassName: 'SshCertificateServiceClient' }, }, resourcemanager: { cloud_service: { importClassName: 'CloudServiceClient' }, diff --git a/src/generated/google/rpc/code.ts b/src/generated/google/rpc/code.ts new file mode 100644 index 00000000..31a20a4b --- /dev/null +++ b/src/generated/google/rpc/code.ts @@ -0,0 +1,294 @@ +/* eslint-disable */ +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "google.rpc"; + +/** + * The canonical error codes for Google APIs. + * + * + * Sometimes multiple error codes may apply. Services should return + * the most specific error code that applies. For example, prefer + * `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. + * Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. + */ +export enum Code { + /** + * OK - Not an error; returned on success + * + * HTTP Mapping: 200 OK + */ + OK = 0, + /** + * CANCELLED - The operation was cancelled, typically by the caller. + * + * HTTP Mapping: 499 Client Closed Request + */ + CANCELLED = 1, + /** + * UNKNOWN - Unknown error. For example, this error may be returned when + * a `Status` value received from another address space belongs to + * an error space that is not known in this address space. Also + * errors raised by APIs that do not return enough error information + * may be converted to this error. + * + * HTTP Mapping: 500 Internal Server Error + */ + UNKNOWN = 2, + /** + * INVALID_ARGUMENT - The client specified an invalid argument. Note that this differs + * from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments + * that are problematic regardless of the state of the system + * (e.g., a malformed file name). + * + * HTTP Mapping: 400 Bad Request + */ + INVALID_ARGUMENT = 3, + /** + * DEADLINE_EXCEEDED - The deadline expired before the operation could complete. For operations + * that change the state of the system, this error may be returned + * even if the operation has completed successfully. For example, a + * successful response from a server could have been delayed long + * enough for the deadline to expire. + * + * HTTP Mapping: 504 Gateway Timeout + */ + DEADLINE_EXCEEDED = 4, + /** + * NOT_FOUND - Some requested entity (e.g., file or directory) was not found. + * + * Note to server developers: if a request is denied for an entire class + * of users, such as gradual feature rollout or undocumented whitelist, + * `NOT_FOUND` may be used. If a request is denied for some users within + * a class of users, such as user-based access control, `PERMISSION_DENIED` + * must be used. + * + * HTTP Mapping: 404 Not Found + */ + NOT_FOUND = 5, + /** + * ALREADY_EXISTS - The entity that a client attempted to create (e.g., file or directory) + * already exists. + * + * HTTP Mapping: 409 Conflict + */ + ALREADY_EXISTS = 6, + /** + * PERMISSION_DENIED - The caller does not have permission to execute the specified + * operation. `PERMISSION_DENIED` must not be used for rejections + * caused by exhausting some resource (use `RESOURCE_EXHAUSTED` + * instead for those errors). `PERMISSION_DENIED` must not be + * used if the caller can not be identified (use `UNAUTHENTICATED` + * instead for those errors). This error code does not imply the + * request is valid or the requested entity exists or satisfies + * other pre-conditions. + * + * HTTP Mapping: 403 Forbidden + */ + PERMISSION_DENIED = 7, + /** + * UNAUTHENTICATED - The request does not have valid authentication credentials for the + * operation. + * + * HTTP Mapping: 401 Unauthorized + */ + UNAUTHENTICATED = 16, + /** + * RESOURCE_EXHAUSTED - Some resource has been exhausted, perhaps a per-user quota, or + * perhaps the entire file system is out of space. + * + * HTTP Mapping: 429 Too Many Requests + */ + RESOURCE_EXHAUSTED = 8, + /** + * FAILED_PRECONDITION - The operation was rejected because the system is not in a state + * required for the operation's execution. For example, the directory + * to be deleted is non-empty, an rmdir operation is applied to + * a non-directory, etc. + * + * Service implementors can use the following guidelines to decide + * between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: + * (a) Use `UNAVAILABLE` if the client can retry just the failing call. + * (b) Use `ABORTED` if the client should retry at a higher level + * (e.g., when a client-specified test-and-set fails, indicating the + * client should restart a read-modify-write sequence). + * (c) Use `FAILED_PRECONDITION` if the client should not retry until + * the system state has been explicitly fixed. E.g., if an "rmdir" + * fails because the directory is non-empty, `FAILED_PRECONDITION` + * should be returned since the client should not retry unless + * the files are deleted from the directory. + * + * HTTP Mapping: 400 Bad Request + */ + FAILED_PRECONDITION = 9, + /** + * ABORTED - The operation was aborted, typically due to a concurrency issue such as + * a sequencer check failure or transaction abort. + * + * See the guidelines above for deciding between `FAILED_PRECONDITION`, + * `ABORTED`, and `UNAVAILABLE`. + * + * HTTP Mapping: 409 Conflict + */ + ABORTED = 10, + /** + * OUT_OF_RANGE - The operation was attempted past the valid range. E.g., seeking or + * reading past end-of-file. + * + * Unlike `INVALID_ARGUMENT`, this error indicates a problem that may + * be fixed if the system state changes. For example, a 32-bit file + * system will generate `INVALID_ARGUMENT` if asked to read at an + * offset that is not in the range [0,2^32-1], but it will generate + * `OUT_OF_RANGE` if asked to read from an offset past the current + * file size. + * + * There is a fair bit of overlap between `FAILED_PRECONDITION` and + * `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific + * error) when it applies so that callers who are iterating through + * a space can easily look for an `OUT_OF_RANGE` error to detect when + * they are done. + * + * HTTP Mapping: 400 Bad Request + */ + OUT_OF_RANGE = 11, + /** + * UNIMPLEMENTED - The operation is not implemented or is not supported/enabled in this + * service. + * + * HTTP Mapping: 501 Not Implemented + */ + UNIMPLEMENTED = 12, + /** + * INTERNAL - Internal errors. This means that some invariants expected by the + * underlying system have been broken. This error code is reserved + * for serious errors. + * + * HTTP Mapping: 500 Internal Server Error + */ + INTERNAL = 13, + /** + * UNAVAILABLE - The service is currently unavailable. This is most likely a + * transient condition, which can be corrected by retrying with + * a backoff. + * + * See the guidelines above for deciding between `FAILED_PRECONDITION`, + * `ABORTED`, and `UNAVAILABLE`. + * + * HTTP Mapping: 503 Service Unavailable + */ + UNAVAILABLE = 14, + /** + * DATA_LOSS - Unrecoverable data loss or corruption. + * + * HTTP Mapping: 500 Internal Server Error + */ + DATA_LOSS = 15, + UNRECOGNIZED = -1, +} + +export function codeFromJSON(object: any): Code { + switch (object) { + case 0: + case "OK": + return Code.OK; + case 1: + case "CANCELLED": + return Code.CANCELLED; + case 2: + case "UNKNOWN": + return Code.UNKNOWN; + case 3: + case "INVALID_ARGUMENT": + return Code.INVALID_ARGUMENT; + case 4: + case "DEADLINE_EXCEEDED": + return Code.DEADLINE_EXCEEDED; + case 5: + case "NOT_FOUND": + return Code.NOT_FOUND; + case 6: + case "ALREADY_EXISTS": + return Code.ALREADY_EXISTS; + case 7: + case "PERMISSION_DENIED": + return Code.PERMISSION_DENIED; + case 16: + case "UNAUTHENTICATED": + return Code.UNAUTHENTICATED; + case 8: + case "RESOURCE_EXHAUSTED": + return Code.RESOURCE_EXHAUSTED; + case 9: + case "FAILED_PRECONDITION": + return Code.FAILED_PRECONDITION; + case 10: + case "ABORTED": + return Code.ABORTED; + case 11: + case "OUT_OF_RANGE": + return Code.OUT_OF_RANGE; + case 12: + case "UNIMPLEMENTED": + return Code.UNIMPLEMENTED; + case 13: + case "INTERNAL": + return Code.INTERNAL; + case 14: + case "UNAVAILABLE": + return Code.UNAVAILABLE; + case 15: + case "DATA_LOSS": + return Code.DATA_LOSS; + case -1: + case "UNRECOGNIZED": + default: + return Code.UNRECOGNIZED; + } +} + +export function codeToJSON(object: Code): string { + switch (object) { + case Code.OK: + return "OK"; + case Code.CANCELLED: + return "CANCELLED"; + case Code.UNKNOWN: + return "UNKNOWN"; + case Code.INVALID_ARGUMENT: + return "INVALID_ARGUMENT"; + case Code.DEADLINE_EXCEEDED: + return "DEADLINE_EXCEEDED"; + case Code.NOT_FOUND: + return "NOT_FOUND"; + case Code.ALREADY_EXISTS: + return "ALREADY_EXISTS"; + case Code.PERMISSION_DENIED: + return "PERMISSION_DENIED"; + case Code.UNAUTHENTICATED: + return "UNAUTHENTICATED"; + case Code.RESOURCE_EXHAUSTED: + return "RESOURCE_EXHAUSTED"; + case Code.FAILED_PRECONDITION: + return "FAILED_PRECONDITION"; + case Code.ABORTED: + return "ABORTED"; + case Code.OUT_OF_RANGE: + return "OUT_OF_RANGE"; + case Code.UNIMPLEMENTED: + return "UNIMPLEMENTED"; + case Code.INTERNAL: + return "INTERNAL"; + case Code.UNAVAILABLE: + return "UNAVAILABLE"; + case Code.DATA_LOSS: + return "DATA_LOSS"; + default: + return "UNKNOWN"; + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/index.ts b/src/generated/yandex/cloud/ai/index.ts index 972cfc79..6d0e8bb5 100644 --- a/src/generated/yandex/cloud/ai/index.ts +++ b/src/generated/yandex/cloud/ai/index.ts @@ -1,3 +1,5 @@ +export * as ocr from './ocr/v1/ocr' +export * as ocr_service from './ocr/v1/ocr_service' export * as stt_service from './stt/v2/stt_service' export * as translate_translation from './translate/v2/translation' export * as translate_translation_service from './translate/v2/translation_service' diff --git a/src/generated/yandex/cloud/ai/llm/v1alpha/llm.ts b/src/generated/yandex/cloud/ai/llm/v1alpha/llm.ts new file mode 100644 index 00000000..ddf35ab2 --- /dev/null +++ b/src/generated/yandex/cloud/ai/llm/v1alpha/llm.ts @@ -0,0 +1,448 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + DoubleValue, + Int64Value, +} from "../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.ai.llm.v1alpha"; + +/** Defines the options for text generation. */ +export interface GenerationOptions { + $type: "yandex.cloud.ai.llm.v1alpha.GenerationOptions"; + /** Enables streaming of partially generated text. */ + partialResults: boolean; + /** + * Affects creativity and randomness of responses. Should be a double number between 0 (inclusive) and infinity. + * Lower values produce more straightforward responses, while higher values lead to increased creativity and randomness. + */ + temperature?: number; + /** + * Sets the maximum limit on the total number of tokens used for both the input prompt and the generated response. + * Must be greater than zero and not exceed 7400 tokens. + */ + maxTokens?: number; +} + +/** Represents an alternative generated response, including its score and token count. */ +export interface Alternative { + $type: "yandex.cloud.ai.llm.v1alpha.Alternative"; + /** The generated text response. */ + text: string; + /** The score or confidence of the generated text. */ + score: number; + /** The number of tokens in the generated response. */ + numTokens: number; +} + +/** Represents a message within a chat. */ +export interface Message { + $type: "yandex.cloud.ai.llm.v1alpha.Message"; + /** Identifies the sender of the message. */ + role: string; + /** The text content of the message. */ + text: string; +} + +/** Represents a token, the basic unit of text, used by the LLM. */ +export interface Token { + $type: "yandex.cloud.ai.llm.v1alpha.Token"; + /** An internal token identifier. */ + id: number; + /** The textual representation of the token. */ + text: string; + /** Indicates whether the token is special or not. Special tokens define the model's behavior and are not visible to users. */ + special: boolean; +} + +const baseGenerationOptions: object = { + $type: "yandex.cloud.ai.llm.v1alpha.GenerationOptions", + partialResults: false, +}; + +export const GenerationOptions = { + $type: "yandex.cloud.ai.llm.v1alpha.GenerationOptions" as const, + + encode( + message: GenerationOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.partialResults === true) { + writer.uint32(8).bool(message.partialResults); + } + if (message.temperature !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.temperature! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.maxTokens !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxTokens! }, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GenerationOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGenerationOptions } as GenerationOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.partialResults = reader.bool(); + break; + case 2: + message.temperature = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.maxTokens = Int64Value.decode(reader, reader.uint32()).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GenerationOptions { + const message = { ...baseGenerationOptions } as GenerationOptions; + message.partialResults = + object.partialResults !== undefined && object.partialResults !== null + ? Boolean(object.partialResults) + : false; + message.temperature = + object.temperature !== undefined && object.temperature !== null + ? Number(object.temperature) + : undefined; + message.maxTokens = + object.maxTokens !== undefined && object.maxTokens !== null + ? Number(object.maxTokens) + : undefined; + return message; + }, + + toJSON(message: GenerationOptions): unknown { + const obj: any = {}; + message.partialResults !== undefined && + (obj.partialResults = message.partialResults); + message.temperature !== undefined && + (obj.temperature = message.temperature); + message.maxTokens !== undefined && (obj.maxTokens = message.maxTokens); + return obj; + }, + + fromPartial, I>>( + object: I + ): GenerationOptions { + const message = { ...baseGenerationOptions } as GenerationOptions; + message.partialResults = object.partialResults ?? false; + message.temperature = object.temperature ?? undefined; + message.maxTokens = object.maxTokens ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(GenerationOptions.$type, GenerationOptions); + +const baseAlternative: object = { + $type: "yandex.cloud.ai.llm.v1alpha.Alternative", + text: "", + score: 0, + numTokens: 0, +}; + +export const Alternative = { + $type: "yandex.cloud.ai.llm.v1alpha.Alternative" as const, + + encode( + message: Alternative, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.text !== "") { + writer.uint32(10).string(message.text); + } + if (message.score !== 0) { + writer.uint32(17).double(message.score); + } + if (message.numTokens !== 0) { + writer.uint32(24).int64(message.numTokens); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Alternative { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAlternative } as Alternative; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.text = reader.string(); + break; + case 2: + message.score = reader.double(); + break; + case 3: + message.numTokens = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Alternative { + const message = { ...baseAlternative } as Alternative; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + message.score = + object.score !== undefined && object.score !== null + ? Number(object.score) + : 0; + message.numTokens = + object.numTokens !== undefined && object.numTokens !== null + ? Number(object.numTokens) + : 0; + return message; + }, + + toJSON(message: Alternative): unknown { + const obj: any = {}; + message.text !== undefined && (obj.text = message.text); + message.score !== undefined && (obj.score = message.score); + message.numTokens !== undefined && + (obj.numTokens = Math.round(message.numTokens)); + return obj; + }, + + fromPartial, I>>( + object: I + ): Alternative { + const message = { ...baseAlternative } as Alternative; + message.text = object.text ?? ""; + message.score = object.score ?? 0; + message.numTokens = object.numTokens ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Alternative.$type, Alternative); + +const baseMessage: object = { + $type: "yandex.cloud.ai.llm.v1alpha.Message", + role: "", + text: "", +}; + +export const Message = { + $type: "yandex.cloud.ai.llm.v1alpha.Message" as const, + + encode( + message: Message, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.role !== "") { + writer.uint32(10).string(message.role); + } + if (message.text !== "") { + writer.uint32(18).string(message.text); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Message { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMessage } as Message; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.role = reader.string(); + break; + case 2: + message.text = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Message { + const message = { ...baseMessage } as Message; + message.role = + object.role !== undefined && object.role !== null + ? String(object.role) + : ""; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + return message; + }, + + toJSON(message: Message): unknown { + const obj: any = {}; + message.role !== undefined && (obj.role = message.role); + message.text !== undefined && (obj.text = message.text); + return obj; + }, + + fromPartial, I>>(object: I): Message { + const message = { ...baseMessage } as Message; + message.role = object.role ?? ""; + message.text = object.text ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Message.$type, Message); + +const baseToken: object = { + $type: "yandex.cloud.ai.llm.v1alpha.Token", + id: 0, + text: "", + special: false, +}; + +export const Token = { + $type: "yandex.cloud.ai.llm.v1alpha.Token" as const, + + encode(message: Token, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.id !== 0) { + writer.uint32(8).int64(message.id); + } + if (message.text !== "") { + writer.uint32(18).string(message.text); + } + if (message.special === true) { + writer.uint32(24).bool(message.special); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Token { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseToken } as Token; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = longToNumber(reader.int64() as Long); + break; + case 2: + message.text = reader.string(); + break; + case 3: + message.special = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Token { + const message = { ...baseToken } as Token; + message.id = + object.id !== undefined && object.id !== null ? Number(object.id) : 0; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + message.special = + object.special !== undefined && object.special !== null + ? Boolean(object.special) + : false; + return message; + }, + + toJSON(message: Token): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = Math.round(message.id)); + message.text !== undefined && (obj.text = message.text); + message.special !== undefined && (obj.special = message.special); + return obj; + }, + + fromPartial, I>>(object: I): Token { + const message = { ...baseToken } as Token; + message.id = object.id ?? 0; + message.text = object.text ?? ""; + message.special = object.special ?? false; + return message; + }, +}; + +messageTypeRegistry.set(Token.$type, Token); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/llm/v1alpha/llm_service.ts b/src/generated/yandex/cloud/ai/llm/v1alpha/llm_service.ts new file mode 100644 index 00000000..f007422a --- /dev/null +++ b/src/generated/yandex/cloud/ai/llm/v1alpha/llm_service.ts @@ -0,0 +1,1178 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleServerStreamingCall, + Client, + CallOptions, + ClientReadableStream, + Metadata, + handleUnaryCall, + ClientUnaryCall, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + GenerationOptions, + Message, + Alternative, + Token, +} from "../../../../../yandex/cloud/ai/llm/v1alpha/llm"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.ai.llm.v1alpha"; + +/** Request for instructing the model to generate text. */ +export interface InstructRequest { + $type: "yandex.cloud.ai.llm.v1alpha.InstructRequest"; + /** + * The name or identifier of the model to be used for text generation. + * Possible value for now: `general`. + */ + model: string; + /** Configuration options for text generation. */ + generationOptions?: GenerationOptions; + /** The text-based instruction for text generation. */ + instructionText: string | undefined; + /** A URI containing instructions for text generation. */ + instructionUri: string | undefined; + /** The text-based request for text generation. */ + requestText: string | undefined; +} + +/** Response containing generated text alternatives and token count. */ +export interface InstructResponse { + $type: "yandex.cloud.ai.llm.v1alpha.InstructResponse"; + /** A list of alternative text responses. */ + alternatives: Alternative[]; + /** The number of tokens used in the prompt, including both the [instruction_text] and [request_text]. */ + numPromptTokens: number; +} + +/** Request to engage in a chat conversation with a text generation model. */ +export interface ChatRequest { + $type: "yandex.cloud.ai.llm.v1alpha.ChatRequest"; + /** + * The name or identifier of the model to be used for the chat. + * Possible value for now: `general`. + */ + model: string; + /** Configuration options for text generation. */ + generationOptions?: GenerationOptions; + /** The text-based instruction for the conversation. */ + instructionText: string | undefined; + /** A list of messages in the conversation. */ + messages: Message[]; +} + +/** Contains a model-generated response for a chat query. */ +export interface ChatResponse { + $type: "yandex.cloud.ai.llm.v1alpha.ChatResponse"; + /** The assistant's message in the chat conversation. */ + message?: Message; + /** Total number of tokens used in both the chat request and chat response. */ + numTokens: number; +} + +/** Request to tokenize input text. */ +export interface TokenizeRequest { + $type: "yandex.cloud.ai.llm.v1alpha.TokenizeRequest"; + /** + * The name or identifier of the model to be used for tokenization. + * Possible values for now: `general`, `general:embedding`. + */ + model: string; + /** The input text to tokenize. */ + text: string; +} + +/** Tokenization response. */ +export interface TokenizeResponse { + $type: "yandex.cloud.ai.llm.v1alpha.TokenizeResponse"; + /** A list of tokens obtained from tokenization. */ + tokens: Token[]; +} + +/** Represents a request to obtain embeddings for text data. */ +export interface EmbeddingRequest { + $type: "yandex.cloud.ai.llm.v1alpha.EmbeddingRequest"; + /** The type of embedding to be generated. */ + embeddingType: EmbeddingRequest_EmbeddingType; + /** The name or identifier of the model to be used for embedding. Possible value for now: `general:embedding`. */ + model: string; + /** The input text for which the embedding is requested. */ + text: string; +} + +/** Enum to specify the type of embedding to be generated. */ +export enum EmbeddingRequest_EmbeddingType { + /** EMBEDDING_TYPE_UNSPECIFIED - Unspecified embedding type. */ + EMBEDDING_TYPE_UNSPECIFIED = 0, + /** + * EMBEDDING_TYPE_QUERY - Embedding for a query. Use this when you have a short query or search term + * that you want to obtain an embedding for. Query embeddings are typically + * used in information retrieval and search applications. + */ + EMBEDDING_TYPE_QUERY = 1, + /** + * EMBEDDING_TYPE_DOCUMENT - Embedding for a document. Use this when you have a longer document or a piece + * of text that you want to obtain an embedding for. Document embeddings are often + * used in natural language understanding and document similarity tasks. + */ + EMBEDDING_TYPE_DOCUMENT = 2, + UNRECOGNIZED = -1, +} + +export function embeddingRequest_EmbeddingTypeFromJSON( + object: any +): EmbeddingRequest_EmbeddingType { + switch (object) { + case 0: + case "EMBEDDING_TYPE_UNSPECIFIED": + return EmbeddingRequest_EmbeddingType.EMBEDDING_TYPE_UNSPECIFIED; + case 1: + case "EMBEDDING_TYPE_QUERY": + return EmbeddingRequest_EmbeddingType.EMBEDDING_TYPE_QUERY; + case 2: + case "EMBEDDING_TYPE_DOCUMENT": + return EmbeddingRequest_EmbeddingType.EMBEDDING_TYPE_DOCUMENT; + case -1: + case "UNRECOGNIZED": + default: + return EmbeddingRequest_EmbeddingType.UNRECOGNIZED; + } +} + +export function embeddingRequest_EmbeddingTypeToJSON( + object: EmbeddingRequest_EmbeddingType +): string { + switch (object) { + case EmbeddingRequest_EmbeddingType.EMBEDDING_TYPE_UNSPECIFIED: + return "EMBEDDING_TYPE_UNSPECIFIED"; + case EmbeddingRequest_EmbeddingType.EMBEDDING_TYPE_QUERY: + return "EMBEDDING_TYPE_QUERY"; + case EmbeddingRequest_EmbeddingType.EMBEDDING_TYPE_DOCUMENT: + return "EMBEDDING_TYPE_DOCUMENT"; + default: + return "UNKNOWN"; + } +} + +/** Represents a response containing embeddings for input text data. */ +export interface EmbeddingResponse { + $type: "yandex.cloud.ai.llm.v1alpha.EmbeddingResponse"; + /** A repeated list of double values representing the embedding. */ + embedding: number[]; + /** The number of tokens in the input text. */ + numTokens: number; +} + +const baseInstructRequest: object = { + $type: "yandex.cloud.ai.llm.v1alpha.InstructRequest", + model: "", +}; + +export const InstructRequest = { + $type: "yandex.cloud.ai.llm.v1alpha.InstructRequest" as const, + + encode( + message: InstructRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.model !== "") { + writer.uint32(10).string(message.model); + } + if (message.generationOptions !== undefined) { + GenerationOptions.encode( + message.generationOptions, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.instructionText !== undefined) { + writer.uint32(26).string(message.instructionText); + } + if (message.instructionUri !== undefined) { + writer.uint32(42).string(message.instructionUri); + } + if (message.requestText !== undefined) { + writer.uint32(34).string(message.requestText); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): InstructRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseInstructRequest } as InstructRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.model = reader.string(); + break; + case 2: + message.generationOptions = GenerationOptions.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.instructionText = reader.string(); + break; + case 5: + message.instructionUri = reader.string(); + break; + case 4: + message.requestText = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): InstructRequest { + const message = { ...baseInstructRequest } as InstructRequest; + message.model = + object.model !== undefined && object.model !== null + ? String(object.model) + : ""; + message.generationOptions = + object.generationOptions !== undefined && + object.generationOptions !== null + ? GenerationOptions.fromJSON(object.generationOptions) + : undefined; + message.instructionText = + object.instructionText !== undefined && object.instructionText !== null + ? String(object.instructionText) + : undefined; + message.instructionUri = + object.instructionUri !== undefined && object.instructionUri !== null + ? String(object.instructionUri) + : undefined; + message.requestText = + object.requestText !== undefined && object.requestText !== null + ? String(object.requestText) + : undefined; + return message; + }, + + toJSON(message: InstructRequest): unknown { + const obj: any = {}; + message.model !== undefined && (obj.model = message.model); + message.generationOptions !== undefined && + (obj.generationOptions = message.generationOptions + ? GenerationOptions.toJSON(message.generationOptions) + : undefined); + message.instructionText !== undefined && + (obj.instructionText = message.instructionText); + message.instructionUri !== undefined && + (obj.instructionUri = message.instructionUri); + message.requestText !== undefined && + (obj.requestText = message.requestText); + return obj; + }, + + fromPartial, I>>( + object: I + ): InstructRequest { + const message = { ...baseInstructRequest } as InstructRequest; + message.model = object.model ?? ""; + message.generationOptions = + object.generationOptions !== undefined && + object.generationOptions !== null + ? GenerationOptions.fromPartial(object.generationOptions) + : undefined; + message.instructionText = object.instructionText ?? undefined; + message.instructionUri = object.instructionUri ?? undefined; + message.requestText = object.requestText ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(InstructRequest.$type, InstructRequest); + +const baseInstructResponse: object = { + $type: "yandex.cloud.ai.llm.v1alpha.InstructResponse", + numPromptTokens: 0, +}; + +export const InstructResponse = { + $type: "yandex.cloud.ai.llm.v1alpha.InstructResponse" as const, + + encode( + message: InstructResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.alternatives) { + Alternative.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.numPromptTokens !== 0) { + writer.uint32(16).int64(message.numPromptTokens); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): InstructResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseInstructResponse } as InstructResponse; + message.alternatives = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alternatives.push( + Alternative.decode(reader, reader.uint32()) + ); + break; + case 2: + message.numPromptTokens = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): InstructResponse { + const message = { ...baseInstructResponse } as InstructResponse; + message.alternatives = (object.alternatives ?? []).map((e: any) => + Alternative.fromJSON(e) + ); + message.numPromptTokens = + object.numPromptTokens !== undefined && object.numPromptTokens !== null + ? Number(object.numPromptTokens) + : 0; + return message; + }, + + toJSON(message: InstructResponse): unknown { + const obj: any = {}; + if (message.alternatives) { + obj.alternatives = message.alternatives.map((e) => + e ? Alternative.toJSON(e) : undefined + ); + } else { + obj.alternatives = []; + } + message.numPromptTokens !== undefined && + (obj.numPromptTokens = Math.round(message.numPromptTokens)); + return obj; + }, + + fromPartial, I>>( + object: I + ): InstructResponse { + const message = { ...baseInstructResponse } as InstructResponse; + message.alternatives = + object.alternatives?.map((e) => Alternative.fromPartial(e)) || []; + message.numPromptTokens = object.numPromptTokens ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(InstructResponse.$type, InstructResponse); + +const baseChatRequest: object = { + $type: "yandex.cloud.ai.llm.v1alpha.ChatRequest", + model: "", +}; + +export const ChatRequest = { + $type: "yandex.cloud.ai.llm.v1alpha.ChatRequest" as const, + + encode( + message: ChatRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.model !== "") { + writer.uint32(10).string(message.model); + } + if (message.generationOptions !== undefined) { + GenerationOptions.encode( + message.generationOptions, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.instructionText !== undefined) { + writer.uint32(26).string(message.instructionText); + } + for (const v of message.messages) { + Message.encode(v!, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ChatRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseChatRequest } as ChatRequest; + message.messages = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.model = reader.string(); + break; + case 2: + message.generationOptions = GenerationOptions.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.instructionText = reader.string(); + break; + case 4: + message.messages.push(Message.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChatRequest { + const message = { ...baseChatRequest } as ChatRequest; + message.model = + object.model !== undefined && object.model !== null + ? String(object.model) + : ""; + message.generationOptions = + object.generationOptions !== undefined && + object.generationOptions !== null + ? GenerationOptions.fromJSON(object.generationOptions) + : undefined; + message.instructionText = + object.instructionText !== undefined && object.instructionText !== null + ? String(object.instructionText) + : undefined; + message.messages = (object.messages ?? []).map((e: any) => + Message.fromJSON(e) + ); + return message; + }, + + toJSON(message: ChatRequest): unknown { + const obj: any = {}; + message.model !== undefined && (obj.model = message.model); + message.generationOptions !== undefined && + (obj.generationOptions = message.generationOptions + ? GenerationOptions.toJSON(message.generationOptions) + : undefined); + message.instructionText !== undefined && + (obj.instructionText = message.instructionText); + if (message.messages) { + obj.messages = message.messages.map((e) => + e ? Message.toJSON(e) : undefined + ); + } else { + obj.messages = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ChatRequest { + const message = { ...baseChatRequest } as ChatRequest; + message.model = object.model ?? ""; + message.generationOptions = + object.generationOptions !== undefined && + object.generationOptions !== null + ? GenerationOptions.fromPartial(object.generationOptions) + : undefined; + message.instructionText = object.instructionText ?? undefined; + message.messages = + object.messages?.map((e) => Message.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(ChatRequest.$type, ChatRequest); + +const baseChatResponse: object = { + $type: "yandex.cloud.ai.llm.v1alpha.ChatResponse", + numTokens: 0, +}; + +export const ChatResponse = { + $type: "yandex.cloud.ai.llm.v1alpha.ChatResponse" as const, + + encode( + message: ChatResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.message !== undefined) { + Message.encode(message.message, writer.uint32(10).fork()).ldelim(); + } + if (message.numTokens !== 0) { + writer.uint32(16).int64(message.numTokens); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ChatResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseChatResponse } as ChatResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.message = Message.decode(reader, reader.uint32()); + break; + case 2: + message.numTokens = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChatResponse { + const message = { ...baseChatResponse } as ChatResponse; + message.message = + object.message !== undefined && object.message !== null + ? Message.fromJSON(object.message) + : undefined; + message.numTokens = + object.numTokens !== undefined && object.numTokens !== null + ? Number(object.numTokens) + : 0; + return message; + }, + + toJSON(message: ChatResponse): unknown { + const obj: any = {}; + message.message !== undefined && + (obj.message = message.message + ? Message.toJSON(message.message) + : undefined); + message.numTokens !== undefined && + (obj.numTokens = Math.round(message.numTokens)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ChatResponse { + const message = { ...baseChatResponse } as ChatResponse; + message.message = + object.message !== undefined && object.message !== null + ? Message.fromPartial(object.message) + : undefined; + message.numTokens = object.numTokens ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ChatResponse.$type, ChatResponse); + +const baseTokenizeRequest: object = { + $type: "yandex.cloud.ai.llm.v1alpha.TokenizeRequest", + model: "", + text: "", +}; + +export const TokenizeRequest = { + $type: "yandex.cloud.ai.llm.v1alpha.TokenizeRequest" as const, + + encode( + message: TokenizeRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.model !== "") { + writer.uint32(10).string(message.model); + } + if (message.text !== "") { + writer.uint32(18).string(message.text); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): TokenizeRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTokenizeRequest } as TokenizeRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.model = reader.string(); + break; + case 2: + message.text = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TokenizeRequest { + const message = { ...baseTokenizeRequest } as TokenizeRequest; + message.model = + object.model !== undefined && object.model !== null + ? String(object.model) + : ""; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + return message; + }, + + toJSON(message: TokenizeRequest): unknown { + const obj: any = {}; + message.model !== undefined && (obj.model = message.model); + message.text !== undefined && (obj.text = message.text); + return obj; + }, + + fromPartial, I>>( + object: I + ): TokenizeRequest { + const message = { ...baseTokenizeRequest } as TokenizeRequest; + message.model = object.model ?? ""; + message.text = object.text ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(TokenizeRequest.$type, TokenizeRequest); + +const baseTokenizeResponse: object = { + $type: "yandex.cloud.ai.llm.v1alpha.TokenizeResponse", +}; + +export const TokenizeResponse = { + $type: "yandex.cloud.ai.llm.v1alpha.TokenizeResponse" as const, + + encode( + message: TokenizeResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.tokens) { + Token.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): TokenizeResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTokenizeResponse } as TokenizeResponse; + message.tokens = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tokens.push(Token.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TokenizeResponse { + const message = { ...baseTokenizeResponse } as TokenizeResponse; + message.tokens = (object.tokens ?? []).map((e: any) => Token.fromJSON(e)); + return message; + }, + + toJSON(message: TokenizeResponse): unknown { + const obj: any = {}; + if (message.tokens) { + obj.tokens = message.tokens.map((e) => (e ? Token.toJSON(e) : undefined)); + } else { + obj.tokens = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): TokenizeResponse { + const message = { ...baseTokenizeResponse } as TokenizeResponse; + message.tokens = object.tokens?.map((e) => Token.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(TokenizeResponse.$type, TokenizeResponse); + +const baseEmbeddingRequest: object = { + $type: "yandex.cloud.ai.llm.v1alpha.EmbeddingRequest", + embeddingType: 0, + model: "", + text: "", +}; + +export const EmbeddingRequest = { + $type: "yandex.cloud.ai.llm.v1alpha.EmbeddingRequest" as const, + + encode( + message: EmbeddingRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.embeddingType !== 0) { + writer.uint32(8).int32(message.embeddingType); + } + if (message.model !== "") { + writer.uint32(18).string(message.model); + } + if (message.text !== "") { + writer.uint32(26).string(message.text); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): EmbeddingRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEmbeddingRequest } as EmbeddingRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.embeddingType = reader.int32() as any; + break; + case 2: + message.model = reader.string(); + break; + case 3: + message.text = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EmbeddingRequest { + const message = { ...baseEmbeddingRequest } as EmbeddingRequest; + message.embeddingType = + object.embeddingType !== undefined && object.embeddingType !== null + ? embeddingRequest_EmbeddingTypeFromJSON(object.embeddingType) + : 0; + message.model = + object.model !== undefined && object.model !== null + ? String(object.model) + : ""; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + return message; + }, + + toJSON(message: EmbeddingRequest): unknown { + const obj: any = {}; + message.embeddingType !== undefined && + (obj.embeddingType = embeddingRequest_EmbeddingTypeToJSON( + message.embeddingType + )); + message.model !== undefined && (obj.model = message.model); + message.text !== undefined && (obj.text = message.text); + return obj; + }, + + fromPartial, I>>( + object: I + ): EmbeddingRequest { + const message = { ...baseEmbeddingRequest } as EmbeddingRequest; + message.embeddingType = object.embeddingType ?? 0; + message.model = object.model ?? ""; + message.text = object.text ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(EmbeddingRequest.$type, EmbeddingRequest); + +const baseEmbeddingResponse: object = { + $type: "yandex.cloud.ai.llm.v1alpha.EmbeddingResponse", + embedding: 0, + numTokens: 0, +}; + +export const EmbeddingResponse = { + $type: "yandex.cloud.ai.llm.v1alpha.EmbeddingResponse" as const, + + encode( + message: EmbeddingResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + writer.uint32(10).fork(); + for (const v of message.embedding) { + writer.double(v); + } + writer.ldelim(); + if (message.numTokens !== 0) { + writer.uint32(16).int64(message.numTokens); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): EmbeddingResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEmbeddingResponse } as EmbeddingResponse; + message.embedding = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.embedding.push(reader.double()); + } + } else { + message.embedding.push(reader.double()); + } + break; + case 2: + message.numTokens = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EmbeddingResponse { + const message = { ...baseEmbeddingResponse } as EmbeddingResponse; + message.embedding = (object.embedding ?? []).map((e: any) => Number(e)); + message.numTokens = + object.numTokens !== undefined && object.numTokens !== null + ? Number(object.numTokens) + : 0; + return message; + }, + + toJSON(message: EmbeddingResponse): unknown { + const obj: any = {}; + if (message.embedding) { + obj.embedding = message.embedding.map((e) => e); + } else { + obj.embedding = []; + } + message.numTokens !== undefined && + (obj.numTokens = Math.round(message.numTokens)); + return obj; + }, + + fromPartial, I>>( + object: I + ): EmbeddingResponse { + const message = { ...baseEmbeddingResponse } as EmbeddingResponse; + message.embedding = object.embedding?.map((e) => e) || []; + message.numTokens = object.numTokens ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(EmbeddingResponse.$type, EmbeddingResponse); + +/** Service for text generation and conversation. */ +export const TextGenerationServiceService = { + /** RPC method for instructing the model to generate text. */ + instruct: { + path: "/yandex.cloud.ai.llm.v1alpha.TextGenerationService/Instruct", + requestStream: false, + responseStream: true, + requestSerialize: (value: InstructRequest) => + Buffer.from(InstructRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => InstructRequest.decode(value), + responseSerialize: (value: InstructResponse) => + Buffer.from(InstructResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => InstructResponse.decode(value), + }, + /** RPC method for engaging in a chat conversation with the model. */ + chat: { + path: "/yandex.cloud.ai.llm.v1alpha.TextGenerationService/Chat", + requestStream: false, + responseStream: true, + requestSerialize: (value: ChatRequest) => + Buffer.from(ChatRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ChatRequest.decode(value), + responseSerialize: (value: ChatResponse) => + Buffer.from(ChatResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ChatResponse.decode(value), + }, +} as const; + +export interface TextGenerationServiceServer + extends UntypedServiceImplementation { + /** RPC method for instructing the model to generate text. */ + instruct: handleServerStreamingCall; + /** RPC method for engaging in a chat conversation with the model. */ + chat: handleServerStreamingCall; +} + +export interface TextGenerationServiceClient extends Client { + /** RPC method for instructing the model to generate text. */ + instruct( + request: InstructRequest, + options?: Partial + ): ClientReadableStream; + instruct( + request: InstructRequest, + metadata?: Metadata, + options?: Partial + ): ClientReadableStream; + /** RPC method for engaging in a chat conversation with the model. */ + chat( + request: ChatRequest, + options?: Partial + ): ClientReadableStream; + chat( + request: ChatRequest, + metadata?: Metadata, + options?: Partial + ): ClientReadableStream; +} + +export const TextGenerationServiceClient = makeGenericClientConstructor( + TextGenerationServiceService, + "yandex.cloud.ai.llm.v1alpha.TextGenerationService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): TextGenerationServiceClient; + service: typeof TextGenerationServiceService; +}; + +/** Service for tokenizing input text. */ +export const TokenizerServiceService = { + /** RPC method for tokenizing input text. */ + tokenize: { + path: "/yandex.cloud.ai.llm.v1alpha.TokenizerService/Tokenize", + requestStream: false, + responseStream: false, + requestSerialize: (value: TokenizeRequest) => + Buffer.from(TokenizeRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => TokenizeRequest.decode(value), + responseSerialize: (value: TokenizeResponse) => + Buffer.from(TokenizeResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => TokenizeResponse.decode(value), + }, +} as const; + +export interface TokenizerServiceServer extends UntypedServiceImplementation { + /** RPC method for tokenizing input text. */ + tokenize: handleUnaryCall; +} + +export interface TokenizerServiceClient extends Client { + /** RPC method for tokenizing input text. */ + tokenize( + request: TokenizeRequest, + callback: (error: ServiceError | null, response: TokenizeResponse) => void + ): ClientUnaryCall; + tokenize( + request: TokenizeRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: TokenizeResponse) => void + ): ClientUnaryCall; + tokenize( + request: TokenizeRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: TokenizeResponse) => void + ): ClientUnaryCall; +} + +export const TokenizerServiceClient = makeGenericClientConstructor( + TokenizerServiceService, + "yandex.cloud.ai.llm.v1alpha.TokenizerService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): TokenizerServiceClient; + service: typeof TokenizerServiceService; +}; + +/** Service for obtaining embeddings for text data. */ +export const EmbeddingsServiceService = { + /** RPC method to obtain embeddings for input text data. */ + embedding: { + path: "/yandex.cloud.ai.llm.v1alpha.EmbeddingsService/Embedding", + requestStream: false, + responseStream: false, + requestSerialize: (value: EmbeddingRequest) => + Buffer.from(EmbeddingRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => EmbeddingRequest.decode(value), + responseSerialize: (value: EmbeddingResponse) => + Buffer.from(EmbeddingResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => EmbeddingResponse.decode(value), + }, +} as const; + +export interface EmbeddingsServiceServer extends UntypedServiceImplementation { + /** RPC method to obtain embeddings for input text data. */ + embedding: handleUnaryCall; +} + +export interface EmbeddingsServiceClient extends Client { + /** RPC method to obtain embeddings for input text data. */ + embedding( + request: EmbeddingRequest, + callback: (error: ServiceError | null, response: EmbeddingResponse) => void + ): ClientUnaryCall; + embedding( + request: EmbeddingRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: EmbeddingResponse) => void + ): ClientUnaryCall; + embedding( + request: EmbeddingRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: EmbeddingResponse) => void + ): ClientUnaryCall; +} + +export const EmbeddingsServiceClient = makeGenericClientConstructor( + EmbeddingsServiceService, + "yandex.cloud.ai.llm.v1alpha.EmbeddingsService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): EmbeddingsServiceClient; + service: typeof EmbeddingsServiceService; +}; + +/** Service for asynchronous text generation. */ +export const TextGenerationAsyncServiceService = { + /** RPC method for instructing the model to generate text. */ + instruct: { + path: "/yandex.cloud.ai.llm.v1alpha.TextGenerationAsyncService/Instruct", + requestStream: false, + responseStream: false, + requestSerialize: (value: InstructRequest) => + Buffer.from(InstructRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => InstructRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface TextGenerationAsyncServiceServer + extends UntypedServiceImplementation { + /** RPC method for instructing the model to generate text. */ + instruct: handleUnaryCall; +} + +export interface TextGenerationAsyncServiceClient extends Client { + /** RPC method for instructing the model to generate text. */ + instruct( + request: InstructRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + instruct( + request: InstructRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + instruct( + request: InstructRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const TextGenerationAsyncServiceClient = makeGenericClientConstructor( + TextGenerationAsyncServiceService, + "yandex.cloud.ai.llm.v1alpha.TextGenerationAsyncService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): TextGenerationAsyncServiceClient; + service: typeof TextGenerationAsyncServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/ocr/v1/ocr.ts b/src/generated/yandex/cloud/ai/ocr/v1/ocr.ts new file mode 100644 index 00000000..65d2c509 --- /dev/null +++ b/src/generated/yandex/cloud/ai/ocr/v1/ocr.ts @@ -0,0 +1,774 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.ai.ocr.v1"; + +export interface Polygon { + $type: "yandex.cloud.ai.ocr.v1.Polygon"; + /** The bounding polygon vertices. */ + vertices: Vertex[]; +} + +export interface Vertex { + $type: "yandex.cloud.ai.ocr.v1.Vertex"; + /** X coordinate in pixels. */ + x: number; + /** Y coordinate in pixels. */ + y: number; +} + +export interface TextAnnotation { + $type: "yandex.cloud.ai.ocr.v1.TextAnnotation"; + /** Page width in pixels. */ + width: number; + /** Page height in pixels. */ + height: number; + /** Recognized text blocks in this page. */ + blocks: Block[]; + /** Recognized entities. */ + entities: Entity[]; +} + +export interface Entity { + $type: "yandex.cloud.ai.ocr.v1.Entity"; + /** Entity name. */ + name: string; + /** Recognized entity text. */ + text: string; +} + +export interface Block { + $type: "yandex.cloud.ai.ocr.v1.Block"; + /** Area on the page where the text block is located. */ + boundingBox?: Polygon; + /** Recognized lines in this block. */ + lines: Line[]; + /** A list of detected languages */ + languages: Block_DetectedLanguage[]; +} + +export interface Block_DetectedLanguage { + $type: "yandex.cloud.ai.ocr.v1.Block.DetectedLanguage"; + /** Detected language code. */ + languageCode: string; +} + +export interface Line { + $type: "yandex.cloud.ai.ocr.v1.Line"; + /** Area on the page where the line is located. */ + boundingBox?: Polygon; + /** Recognized text. */ + text: string; + /** Recognized words */ + words: Word[]; +} + +export interface Word { + $type: "yandex.cloud.ai.ocr.v1.Word"; + /** Area on the page where the word is located. */ + boundingBox?: Polygon; + /** Recognized word value. */ + text: string; + /** ID of the recognized word in entities array. */ + entityIndex: number; +} + +const basePolygon: object = { $type: "yandex.cloud.ai.ocr.v1.Polygon" }; + +export const Polygon = { + $type: "yandex.cloud.ai.ocr.v1.Polygon" as const, + + encode( + message: Polygon, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.vertices) { + Vertex.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Polygon { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePolygon } as Polygon; + message.vertices = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.vertices.push(Vertex.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Polygon { + const message = { ...basePolygon } as Polygon; + message.vertices = (object.vertices ?? []).map((e: any) => + Vertex.fromJSON(e) + ); + return message; + }, + + toJSON(message: Polygon): unknown { + const obj: any = {}; + if (message.vertices) { + obj.vertices = message.vertices.map((e) => + e ? Vertex.toJSON(e) : undefined + ); + } else { + obj.vertices = []; + } + return obj; + }, + + fromPartial, I>>(object: I): Polygon { + const message = { ...basePolygon } as Polygon; + message.vertices = object.vertices?.map((e) => Vertex.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Polygon.$type, Polygon); + +const baseVertex: object = { + $type: "yandex.cloud.ai.ocr.v1.Vertex", + x: 0, + y: 0, +}; + +export const Vertex = { + $type: "yandex.cloud.ai.ocr.v1.Vertex" as const, + + encode( + message: Vertex, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.x !== 0) { + writer.uint32(8).int64(message.x); + } + if (message.y !== 0) { + writer.uint32(16).int64(message.y); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Vertex { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseVertex } as Vertex; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.x = longToNumber(reader.int64() as Long); + break; + case 2: + message.y = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Vertex { + const message = { ...baseVertex } as Vertex; + message.x = + object.x !== undefined && object.x !== null ? Number(object.x) : 0; + message.y = + object.y !== undefined && object.y !== null ? Number(object.y) : 0; + return message; + }, + + toJSON(message: Vertex): unknown { + const obj: any = {}; + message.x !== undefined && (obj.x = Math.round(message.x)); + message.y !== undefined && (obj.y = Math.round(message.y)); + return obj; + }, + + fromPartial, I>>(object: I): Vertex { + const message = { ...baseVertex } as Vertex; + message.x = object.x ?? 0; + message.y = object.y ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Vertex.$type, Vertex); + +const baseTextAnnotation: object = { + $type: "yandex.cloud.ai.ocr.v1.TextAnnotation", + width: 0, + height: 0, +}; + +export const TextAnnotation = { + $type: "yandex.cloud.ai.ocr.v1.TextAnnotation" as const, + + encode( + message: TextAnnotation, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.width !== 0) { + writer.uint32(8).int64(message.width); + } + if (message.height !== 0) { + writer.uint32(16).int64(message.height); + } + for (const v of message.blocks) { + Block.encode(v!, writer.uint32(26).fork()).ldelim(); + } + for (const v of message.entities) { + Entity.encode(v!, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): TextAnnotation { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTextAnnotation } as TextAnnotation; + message.blocks = []; + message.entities = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.width = longToNumber(reader.int64() as Long); + break; + case 2: + message.height = longToNumber(reader.int64() as Long); + break; + case 3: + message.blocks.push(Block.decode(reader, reader.uint32())); + break; + case 4: + message.entities.push(Entity.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TextAnnotation { + const message = { ...baseTextAnnotation } as TextAnnotation; + message.width = + object.width !== undefined && object.width !== null + ? Number(object.width) + : 0; + message.height = + object.height !== undefined && object.height !== null + ? Number(object.height) + : 0; + message.blocks = (object.blocks ?? []).map((e: any) => Block.fromJSON(e)); + message.entities = (object.entities ?? []).map((e: any) => + Entity.fromJSON(e) + ); + return message; + }, + + toJSON(message: TextAnnotation): unknown { + const obj: any = {}; + message.width !== undefined && (obj.width = Math.round(message.width)); + message.height !== undefined && (obj.height = Math.round(message.height)); + if (message.blocks) { + obj.blocks = message.blocks.map((e) => (e ? Block.toJSON(e) : undefined)); + } else { + obj.blocks = []; + } + if (message.entities) { + obj.entities = message.entities.map((e) => + e ? Entity.toJSON(e) : undefined + ); + } else { + obj.entities = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): TextAnnotation { + const message = { ...baseTextAnnotation } as TextAnnotation; + message.width = object.width ?? 0; + message.height = object.height ?? 0; + message.blocks = object.blocks?.map((e) => Block.fromPartial(e)) || []; + message.entities = object.entities?.map((e) => Entity.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(TextAnnotation.$type, TextAnnotation); + +const baseEntity: object = { + $type: "yandex.cloud.ai.ocr.v1.Entity", + name: "", + text: "", +}; + +export const Entity = { + $type: "yandex.cloud.ai.ocr.v1.Entity" as const, + + encode( + message: Entity, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.text !== "") { + writer.uint32(18).string(message.text); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Entity { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEntity } as Entity; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.text = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Entity { + const message = { ...baseEntity } as Entity; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + return message; + }, + + toJSON(message: Entity): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.text !== undefined && (obj.text = message.text); + return obj; + }, + + fromPartial, I>>(object: I): Entity { + const message = { ...baseEntity } as Entity; + message.name = object.name ?? ""; + message.text = object.text ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Entity.$type, Entity); + +const baseBlock: object = { $type: "yandex.cloud.ai.ocr.v1.Block" }; + +export const Block = { + $type: "yandex.cloud.ai.ocr.v1.Block" as const, + + encode(message: Block, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.boundingBox !== undefined) { + Polygon.encode(message.boundingBox, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.lines) { + Line.encode(v!, writer.uint32(18).fork()).ldelim(); + } + for (const v of message.languages) { + Block_DetectedLanguage.encode(v!, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Block { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBlock } as Block; + message.lines = []; + message.languages = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.boundingBox = Polygon.decode(reader, reader.uint32()); + break; + case 2: + message.lines.push(Line.decode(reader, reader.uint32())); + break; + case 3: + message.languages.push( + Block_DetectedLanguage.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Block { + const message = { ...baseBlock } as Block; + message.boundingBox = + object.boundingBox !== undefined && object.boundingBox !== null + ? Polygon.fromJSON(object.boundingBox) + : undefined; + message.lines = (object.lines ?? []).map((e: any) => Line.fromJSON(e)); + message.languages = (object.languages ?? []).map((e: any) => + Block_DetectedLanguage.fromJSON(e) + ); + return message; + }, + + toJSON(message: Block): unknown { + const obj: any = {}; + message.boundingBox !== undefined && + (obj.boundingBox = message.boundingBox + ? Polygon.toJSON(message.boundingBox) + : undefined); + if (message.lines) { + obj.lines = message.lines.map((e) => (e ? Line.toJSON(e) : undefined)); + } else { + obj.lines = []; + } + if (message.languages) { + obj.languages = message.languages.map((e) => + e ? Block_DetectedLanguage.toJSON(e) : undefined + ); + } else { + obj.languages = []; + } + return obj; + }, + + fromPartial, I>>(object: I): Block { + const message = { ...baseBlock } as Block; + message.boundingBox = + object.boundingBox !== undefined && object.boundingBox !== null + ? Polygon.fromPartial(object.boundingBox) + : undefined; + message.lines = object.lines?.map((e) => Line.fromPartial(e)) || []; + message.languages = + object.languages?.map((e) => Block_DetectedLanguage.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Block.$type, Block); + +const baseBlock_DetectedLanguage: object = { + $type: "yandex.cloud.ai.ocr.v1.Block.DetectedLanguage", + languageCode: "", +}; + +export const Block_DetectedLanguage = { + $type: "yandex.cloud.ai.ocr.v1.Block.DetectedLanguage" as const, + + encode( + message: Block_DetectedLanguage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.languageCode !== "") { + writer.uint32(10).string(message.languageCode); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Block_DetectedLanguage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBlock_DetectedLanguage } as Block_DetectedLanguage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.languageCode = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Block_DetectedLanguage { + const message = { ...baseBlock_DetectedLanguage } as Block_DetectedLanguage; + message.languageCode = + object.languageCode !== undefined && object.languageCode !== null + ? String(object.languageCode) + : ""; + return message; + }, + + toJSON(message: Block_DetectedLanguage): unknown { + const obj: any = {}; + message.languageCode !== undefined && + (obj.languageCode = message.languageCode); + return obj; + }, + + fromPartial, I>>( + object: I + ): Block_DetectedLanguage { + const message = { ...baseBlock_DetectedLanguage } as Block_DetectedLanguage; + message.languageCode = object.languageCode ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Block_DetectedLanguage.$type, Block_DetectedLanguage); + +const baseLine: object = { $type: "yandex.cloud.ai.ocr.v1.Line", text: "" }; + +export const Line = { + $type: "yandex.cloud.ai.ocr.v1.Line" as const, + + encode(message: Line, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.boundingBox !== undefined) { + Polygon.encode(message.boundingBox, writer.uint32(10).fork()).ldelim(); + } + if (message.text !== "") { + writer.uint32(18).string(message.text); + } + for (const v of message.words) { + Word.encode(v!, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Line { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLine } as Line; + message.words = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.boundingBox = Polygon.decode(reader, reader.uint32()); + break; + case 2: + message.text = reader.string(); + break; + case 3: + message.words.push(Word.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Line { + const message = { ...baseLine } as Line; + message.boundingBox = + object.boundingBox !== undefined && object.boundingBox !== null + ? Polygon.fromJSON(object.boundingBox) + : undefined; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + message.words = (object.words ?? []).map((e: any) => Word.fromJSON(e)); + return message; + }, + + toJSON(message: Line): unknown { + const obj: any = {}; + message.boundingBox !== undefined && + (obj.boundingBox = message.boundingBox + ? Polygon.toJSON(message.boundingBox) + : undefined); + message.text !== undefined && (obj.text = message.text); + if (message.words) { + obj.words = message.words.map((e) => (e ? Word.toJSON(e) : undefined)); + } else { + obj.words = []; + } + return obj; + }, + + fromPartial, I>>(object: I): Line { + const message = { ...baseLine } as Line; + message.boundingBox = + object.boundingBox !== undefined && object.boundingBox !== null + ? Polygon.fromPartial(object.boundingBox) + : undefined; + message.text = object.text ?? ""; + message.words = object.words?.map((e) => Word.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Line.$type, Line); + +const baseWord: object = { + $type: "yandex.cloud.ai.ocr.v1.Word", + text: "", + entityIndex: 0, +}; + +export const Word = { + $type: "yandex.cloud.ai.ocr.v1.Word" as const, + + encode(message: Word, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.boundingBox !== undefined) { + Polygon.encode(message.boundingBox, writer.uint32(10).fork()).ldelim(); + } + if (message.text !== "") { + writer.uint32(18).string(message.text); + } + if (message.entityIndex !== 0) { + writer.uint32(24).int64(message.entityIndex); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Word { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseWord } as Word; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.boundingBox = Polygon.decode(reader, reader.uint32()); + break; + case 2: + message.text = reader.string(); + break; + case 3: + message.entityIndex = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Word { + const message = { ...baseWord } as Word; + message.boundingBox = + object.boundingBox !== undefined && object.boundingBox !== null + ? Polygon.fromJSON(object.boundingBox) + : undefined; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + message.entityIndex = + object.entityIndex !== undefined && object.entityIndex !== null + ? Number(object.entityIndex) + : 0; + return message; + }, + + toJSON(message: Word): unknown { + const obj: any = {}; + message.boundingBox !== undefined && + (obj.boundingBox = message.boundingBox + ? Polygon.toJSON(message.boundingBox) + : undefined); + message.text !== undefined && (obj.text = message.text); + message.entityIndex !== undefined && + (obj.entityIndex = Math.round(message.entityIndex)); + return obj; + }, + + fromPartial, I>>(object: I): Word { + const message = { ...baseWord } as Word; + message.boundingBox = + object.boundingBox !== undefined && object.boundingBox !== null + ? Polygon.fromPartial(object.boundingBox) + : undefined; + message.text = object.text ?? ""; + message.entityIndex = object.entityIndex ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Word.$type, Word); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/ocr/v1/ocr_service.ts b/src/generated/yandex/cloud/ai/ocr/v1/ocr_service.ts new file mode 100644 index 00000000..c168fc39 --- /dev/null +++ b/src/generated/yandex/cloud/ai/ocr/v1/ocr_service.ts @@ -0,0 +1,534 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleServerStreamingCall, + Client, + CallOptions, + ClientReadableStream, + Metadata, + handleUnaryCall, + ClientUnaryCall, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { TextAnnotation } from "../../../../../yandex/cloud/ai/ocr/v1/ocr"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.ai.ocr.v1"; + +export interface RecognizeTextRequest { + $type: "yandex.cloud.ai.ocr.v1.RecognizeTextRequest"; + /** Bytes with data */ + content: Buffer | undefined; + /** + * Specifications of the ([MIME type](https://en.wikipedia.org/wiki/Media_type)). Each specification contains the file to analyze and features to use for analysis. Restrictions: + * * Supported file formats: `JPEG`, `PNG`, `WEBP`, `PDF`. + * * Maximum file size: 20 MB. + * * Image size should not exceed 20M pixels (length x width). + * * The number of pages in a PDF file should not exceed 200 (each page counts as 1 request). + */ + mimeType: string; + /** + * List of the languages to recognize text. + * Specified in [ISO 639-1](https://en.wikipedia.org/wiki/ISO_639-1) format (for example, `ru`). + */ + languageCodes: string[]; + /** + * Model to use for text detection. The maximum string length is 50 characters. Possible values: + * * `page` (default): this model is suitable for detecting multiple text entries in an image. + * * `passport`: passport, the main double-page spread. + * * `driver-license-front`: driver's license, the front side. + * * `driver-license-back`: driver's license, the reverse side. + * * `vehicle-registration-front`: front side of the vehicle registration certificate. + * * `vehicle-registration-back`: back side of the vehicle registration certificate. + */ + model: string; +} + +export interface RecognizeTextResponse { + $type: "yandex.cloud.ai.ocr.v1.RecognizeTextResponse"; + /** Recognized text blocks in this page or text from entities. */ + textAnnotation?: TextAnnotation; + /** Page number in PDF file. */ + page: number; +} + +export interface GetRecognitionRequest { + $type: "yandex.cloud.ai.ocr.v1.GetRecognitionRequest"; + /** Operation ID of async recognition request. */ + operationId: string; +} + +const baseRecognizeTextRequest: object = { + $type: "yandex.cloud.ai.ocr.v1.RecognizeTextRequest", + mimeType: "", + languageCodes: "", + model: "", +}; + +export const RecognizeTextRequest = { + $type: "yandex.cloud.ai.ocr.v1.RecognizeTextRequest" as const, + + encode( + message: RecognizeTextRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.content !== undefined) { + writer.uint32(10).bytes(message.content); + } + if (message.mimeType !== "") { + writer.uint32(18).string(message.mimeType); + } + for (const v of message.languageCodes) { + writer.uint32(26).string(v!); + } + if (message.model !== "") { + writer.uint32(34).string(message.model); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RecognizeTextRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRecognizeTextRequest } as RecognizeTextRequest; + message.languageCodes = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.content = reader.bytes() as Buffer; + break; + case 2: + message.mimeType = reader.string(); + break; + case 3: + message.languageCodes.push(reader.string()); + break; + case 4: + message.model = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RecognizeTextRequest { + const message = { ...baseRecognizeTextRequest } as RecognizeTextRequest; + message.content = + object.content !== undefined && object.content !== null + ? Buffer.from(bytesFromBase64(object.content)) + : undefined; + message.mimeType = + object.mimeType !== undefined && object.mimeType !== null + ? String(object.mimeType) + : ""; + message.languageCodes = (object.languageCodes ?? []).map((e: any) => + String(e) + ); + message.model = + object.model !== undefined && object.model !== null + ? String(object.model) + : ""; + return message; + }, + + toJSON(message: RecognizeTextRequest): unknown { + const obj: any = {}; + message.content !== undefined && + (obj.content = + message.content !== undefined + ? base64FromBytes(message.content) + : undefined); + message.mimeType !== undefined && (obj.mimeType = message.mimeType); + if (message.languageCodes) { + obj.languageCodes = message.languageCodes.map((e) => e); + } else { + obj.languageCodes = []; + } + message.model !== undefined && (obj.model = message.model); + return obj; + }, + + fromPartial, I>>( + object: I + ): RecognizeTextRequest { + const message = { ...baseRecognizeTextRequest } as RecognizeTextRequest; + message.content = object.content ?? undefined; + message.mimeType = object.mimeType ?? ""; + message.languageCodes = object.languageCodes?.map((e) => e) || []; + message.model = object.model ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RecognizeTextRequest.$type, RecognizeTextRequest); + +const baseRecognizeTextResponse: object = { + $type: "yandex.cloud.ai.ocr.v1.RecognizeTextResponse", + page: 0, +}; + +export const RecognizeTextResponse = { + $type: "yandex.cloud.ai.ocr.v1.RecognizeTextResponse" as const, + + encode( + message: RecognizeTextResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.textAnnotation !== undefined) { + TextAnnotation.encode( + message.textAnnotation, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.page !== 0) { + writer.uint32(16).int64(message.page); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RecognizeTextResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRecognizeTextResponse } as RecognizeTextResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.textAnnotation = TextAnnotation.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.page = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RecognizeTextResponse { + const message = { ...baseRecognizeTextResponse } as RecognizeTextResponse; + message.textAnnotation = + object.textAnnotation !== undefined && object.textAnnotation !== null + ? TextAnnotation.fromJSON(object.textAnnotation) + : undefined; + message.page = + object.page !== undefined && object.page !== null + ? Number(object.page) + : 0; + return message; + }, + + toJSON(message: RecognizeTextResponse): unknown { + const obj: any = {}; + message.textAnnotation !== undefined && + (obj.textAnnotation = message.textAnnotation + ? TextAnnotation.toJSON(message.textAnnotation) + : undefined); + message.page !== undefined && (obj.page = Math.round(message.page)); + return obj; + }, + + fromPartial, I>>( + object: I + ): RecognizeTextResponse { + const message = { ...baseRecognizeTextResponse } as RecognizeTextResponse; + message.textAnnotation = + object.textAnnotation !== undefined && object.textAnnotation !== null + ? TextAnnotation.fromPartial(object.textAnnotation) + : undefined; + message.page = object.page ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(RecognizeTextResponse.$type, RecognizeTextResponse); + +const baseGetRecognitionRequest: object = { + $type: "yandex.cloud.ai.ocr.v1.GetRecognitionRequest", + operationId: "", +}; + +export const GetRecognitionRequest = { + $type: "yandex.cloud.ai.ocr.v1.GetRecognitionRequest" as const, + + encode( + message: GetRecognitionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.operationId !== "") { + writer.uint32(10).string(message.operationId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetRecognitionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetRecognitionRequest } as GetRecognitionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operationId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetRecognitionRequest { + const message = { ...baseGetRecognitionRequest } as GetRecognitionRequest; + message.operationId = + object.operationId !== undefined && object.operationId !== null + ? String(object.operationId) + : ""; + return message; + }, + + toJSON(message: GetRecognitionRequest): unknown { + const obj: any = {}; + message.operationId !== undefined && + (obj.operationId = message.operationId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetRecognitionRequest { + const message = { ...baseGetRecognitionRequest } as GetRecognitionRequest; + message.operationId = object.operationId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetRecognitionRequest.$type, GetRecognitionRequest); + +/** A set of methods for the Vision OCR service. */ +export const TextRecognitionServiceService = { + /** To send the image for text recognition. */ + recognize: { + path: "/yandex.cloud.ai.ocr.v1.TextRecognitionService/Recognize", + requestStream: false, + responseStream: true, + requestSerialize: (value: RecognizeTextRequest) => + Buffer.from(RecognizeTextRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RecognizeTextRequest.decode(value), + responseSerialize: (value: RecognizeTextResponse) => + Buffer.from(RecognizeTextResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => RecognizeTextResponse.decode(value), + }, +} as const; + +export interface TextRecognitionServiceServer + extends UntypedServiceImplementation { + /** To send the image for text recognition. */ + recognize: handleServerStreamingCall< + RecognizeTextRequest, + RecognizeTextResponse + >; +} + +export interface TextRecognitionServiceClient extends Client { + /** To send the image for text recognition. */ + recognize( + request: RecognizeTextRequest, + options?: Partial + ): ClientReadableStream; + recognize( + request: RecognizeTextRequest, + metadata?: Metadata, + options?: Partial + ): ClientReadableStream; +} + +export const TextRecognitionServiceClient = makeGenericClientConstructor( + TextRecognitionServiceService, + "yandex.cloud.ai.ocr.v1.TextRecognitionService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): TextRecognitionServiceClient; + service: typeof TextRecognitionServiceService; +}; + +/** A set of methods for managing operations for asynchronous API requests. */ +export const TextRecognitionAsyncServiceService = { + /** To send the image for asynchronous text recognition. */ + recognize: { + path: "/yandex.cloud.ai.ocr.v1.TextRecognitionAsyncService/Recognize", + requestStream: false, + responseStream: false, + requestSerialize: (value: RecognizeTextRequest) => + Buffer.from(RecognizeTextRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RecognizeTextRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** To get recognition results. */ + getRecognition: { + path: "/yandex.cloud.ai.ocr.v1.TextRecognitionAsyncService/GetRecognition", + requestStream: false, + responseStream: true, + requestSerialize: (value: GetRecognitionRequest) => + Buffer.from(GetRecognitionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetRecognitionRequest.decode(value), + responseSerialize: (value: RecognizeTextResponse) => + Buffer.from(RecognizeTextResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => RecognizeTextResponse.decode(value), + }, +} as const; + +export interface TextRecognitionAsyncServiceServer + extends UntypedServiceImplementation { + /** To send the image for asynchronous text recognition. */ + recognize: handleUnaryCall; + /** To get recognition results. */ + getRecognition: handleServerStreamingCall< + GetRecognitionRequest, + RecognizeTextResponse + >; +} + +export interface TextRecognitionAsyncServiceClient extends Client { + /** To send the image for asynchronous text recognition. */ + recognize( + request: RecognizeTextRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + recognize( + request: RecognizeTextRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + recognize( + request: RecognizeTextRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** To get recognition results. */ + getRecognition( + request: GetRecognitionRequest, + options?: Partial + ): ClientReadableStream; + getRecognition( + request: GetRecognitionRequest, + metadata?: Metadata, + options?: Partial + ): ClientReadableStream; +} + +export const TextRecognitionAsyncServiceClient = makeGenericClientConstructor( + TextRecognitionAsyncServiceService, + "yandex.cloud.ai.ocr.v1.TextRecognitionAsyncService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): TextRecognitionAsyncServiceClient; + service: typeof TextRecognitionAsyncServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (const byte of arr) { + bin.push(String.fromCharCode(byte)); + } + return btoa(bin.join("")); +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/stt/v3/stt.ts b/src/generated/yandex/cloud/ai/stt/v3/stt.ts index 40f4897a..ba57c3cf 100644 --- a/src/generated/yandex/cloud/ai/stt/v3/stt.ts +++ b/src/generated/yandex/cloud/ai/stt/v3/stt.ts @@ -52,14 +52,16 @@ export function codeTypeToJSON(object: CodeType): string { } } -/** options */ +/** Options */ export interface TextNormalizationOptions { $type: "speechkit.stt.v3.TextNormalizationOptions"; textNormalization: TextNormalizationOptions_TextNormalization; - /** Filter profanity (default: false). */ + /** Profanity filter (default: false). */ profanityFilter: boolean; /** Rewrite text in literature style (default: false). */ literatureText: boolean; + /** Define phone formatting mode */ + phoneFormattingMode: TextNormalizationOptions_PhoneFormattingMode; } /** Normalization */ @@ -107,11 +109,48 @@ export function textNormalizationOptions_TextNormalizationToJSON( } } +export enum TextNormalizationOptions_PhoneFormattingMode { + PHONE_FORMATTING_MODE_UNSPECIFIED = 0, + /** PHONE_FORMATTING_MODE_DISABLED - Disable phone formatting */ + PHONE_FORMATTING_MODE_DISABLED = 1, + UNRECOGNIZED = -1, +} + +export function textNormalizationOptions_PhoneFormattingModeFromJSON( + object: any +): TextNormalizationOptions_PhoneFormattingMode { + switch (object) { + case 0: + case "PHONE_FORMATTING_MODE_UNSPECIFIED": + return TextNormalizationOptions_PhoneFormattingMode.PHONE_FORMATTING_MODE_UNSPECIFIED; + case 1: + case "PHONE_FORMATTING_MODE_DISABLED": + return TextNormalizationOptions_PhoneFormattingMode.PHONE_FORMATTING_MODE_DISABLED; + case -1: + case "UNRECOGNIZED": + default: + return TextNormalizationOptions_PhoneFormattingMode.UNRECOGNIZED; + } +} + +export function textNormalizationOptions_PhoneFormattingModeToJSON( + object: TextNormalizationOptions_PhoneFormattingMode +): string { + switch (object) { + case TextNormalizationOptions_PhoneFormattingMode.PHONE_FORMATTING_MODE_UNSPECIFIED: + return "PHONE_FORMATTING_MODE_UNSPECIFIED"; + case TextNormalizationOptions_PhoneFormattingMode.PHONE_FORMATTING_MODE_DISABLED: + return "PHONE_FORMATTING_MODE_DISABLED"; + default: + return "UNKNOWN"; + } +} + export interface DefaultEouClassifier { $type: "speechkit.stt.v3.DefaultEouClassifier"; /** EOU sensitivity. Currently two levels, faster with more error and more conservative (our default). */ type: DefaultEouClassifier_EouSensitivity; - /** Hint for max pause between words. Our EoU detector could use this information to distinguish between end of utterance and slow speech (like one two three, etc). */ + /** Hint for max pause between words. Our EOU detector could use this information to distinguish between end of utterance and slow speech (like one two three, etc). */ maxPauseBetweenWordsHintMs: number; } @@ -166,10 +205,79 @@ export interface EouClassifierOptions { $type: "speechkit.stt.v3.EouClassifierOptions"; /** EOU classifier provided by SpeechKit. Default. */ defaultClassifier?: DefaultEouClassifier | undefined; - /** EoU is enforced by external messages from user. */ + /** EOU is enforced by external messages from user. */ externalClassifier?: ExternalEouClassifier | undefined; } +export interface RecognitionClassifier { + $type: "speechkit.stt.v3.RecognitionClassifier"; + /** Classifier name */ + classifier: string; + /** Describes the types of responses to which the classification results will come */ + triggers: RecognitionClassifier_TriggerType[]; +} + +export enum RecognitionClassifier_TriggerType { + /** TRIGGER_TYPE_UNSPECIFIED - Do not use */ + TRIGGER_TYPE_UNSPECIFIED = 0, + /** ON_UTTERANCE - Apply classifier to utterance responses */ + ON_UTTERANCE = 1, + /** ON_FINAL - Apply classifier to final responses */ + ON_FINAL = 2, + UNRECOGNIZED = -1, +} + +export function recognitionClassifier_TriggerTypeFromJSON( + object: any +): RecognitionClassifier_TriggerType { + switch (object) { + case 0: + case "TRIGGER_TYPE_UNSPECIFIED": + return RecognitionClassifier_TriggerType.TRIGGER_TYPE_UNSPECIFIED; + case 1: + case "ON_UTTERANCE": + return RecognitionClassifier_TriggerType.ON_UTTERANCE; + case 2: + case "ON_FINAL": + return RecognitionClassifier_TriggerType.ON_FINAL; + case -1: + case "UNRECOGNIZED": + default: + return RecognitionClassifier_TriggerType.UNRECOGNIZED; + } +} + +export function recognitionClassifier_TriggerTypeToJSON( + object: RecognitionClassifier_TriggerType +): string { + switch (object) { + case RecognitionClassifier_TriggerType.TRIGGER_TYPE_UNSPECIFIED: + return "TRIGGER_TYPE_UNSPECIFIED"; + case RecognitionClassifier_TriggerType.ON_UTTERANCE: + return "ON_UTTERANCE"; + case RecognitionClassifier_TriggerType.ON_FINAL: + return "ON_FINAL"; + default: + return "UNKNOWN"; + } +} + +export interface RecognitionClassifierOptions { + $type: "speechkit.stt.v3.RecognitionClassifierOptions"; + /** List of classifiers to use */ + classifiers: RecognitionClassifier[]; +} + +export interface SpeechAnalysisOptions { + $type: "speechkit.stt.v3.SpeechAnalysisOptions"; + /** Analyse speech for every speaker */ + enableSpeakerAnalysis: boolean; + /** Analyse conversation of two speakers */ + enableConversationAnalysis: boolean; + /** Quantile levels in range (0, 1) for descriptive statistics */ + descriptiveStatisticsQuantiles: number[]; +} + /** RAW Audio format spec (no container to infer type). Used in AudioFormat options. */ export interface RawAudio { $type: "speechkit.stt.v3.RawAudio"; @@ -285,15 +393,20 @@ export interface AudioFormatOptions { containerAudio?: ContainerAudio | undefined; } +/** Type of restriction for the list of languages expected in the incoming speech stream. */ export interface LanguageRestrictionOptions { $type: "speechkit.stt.v3.LanguageRestrictionOptions"; + /** Language restriction type */ restrictionType: LanguageRestrictionOptions_LanguageRestrictionType; + /** The list of language codes to restrict recognition in the case of an auto model */ languageCode: string[]; } export enum LanguageRestrictionOptions_LanguageRestrictionType { LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED = 0, + /** WHITELIST - The allowing list. The incoming audio can contain only the listed languages. */ WHITELIST = 1, + /** BLACKLIST - The forbidding list. The incoming audio cannot contain the listed languages. */ BLACKLIST = 2, UNRECOGNIZED = -1, } @@ -335,7 +448,10 @@ export function languageRestrictionOptions_LanguageRestrictionTypeToJSON( export interface RecognitionModelOptions { $type: "speechkit.stt.v3.RecognitionModelOptions"; - /** Reserved for future, do not use. */ + /** + * Sets the recognition model for the cloud version of SpeechKit. Possible values: 'general', 'general:rc', 'general:deprecated'. + * The model is ignored for SpeechKit Hybrid. + */ model: string; /** Specified input audio. */ audioFormat?: AudioFormatOptions; @@ -349,7 +465,9 @@ export interface RecognitionModelOptions { export enum RecognitionModelOptions_AudioProcessingType { AUDIO_PROCESSING_TYPE_UNSPECIFIED = 0, + /** REAL_TIME - Process audio in mode optimized for real-time recognition, i.e. send partials and final responses as soon as possible */ REAL_TIME = 1, + /** FULL_DATA - Process audio after all data was received */ FULL_DATA = 2, UNRECOGNIZED = -1, } @@ -395,6 +513,10 @@ export interface StreamingOptions { recognitionModel?: RecognitionModelOptions; /** Configuration for end of utterance detection model. */ eouClassifier?: EouClassifierOptions; + /** Configuration for classifiers over speech recognition. */ + recognitionClassifier?: RecognitionClassifierOptions; + /** Configuration for speech analysis over speech recognition. */ + speechAnalysis?: SpeechAnalysisOptions; } /** Data chunk with audio. */ @@ -404,6 +526,7 @@ export interface AudioChunk { data: Buffer; } +/** Data chunk with silence. */ export interface SilenceChunk { $type: "speechkit.stt.v3.SilenceChunk"; /** Duration of silence chunk in ms. */ @@ -423,27 +546,50 @@ export interface Eou { */ export interface StreamingRequest { $type: "speechkit.stt.v3.StreamingRequest"; - /** Session options. should be first message from user */ + /** Session options. Should be the first message from user. */ sessionOptions?: StreamingOptions | undefined; /** Chunk with audio data. */ chunk?: AudioChunk | undefined; /** Chunk with silence. */ silenceChunk?: SilenceChunk | undefined; - /** Request to end current utterance. Works only with external EoU detector. */ + /** Request to end current utterance. Works only with external EOU detector. */ eou?: Eou | undefined; } +export interface RecognizeFileRequest { + $type: "speechkit.stt.v3.RecognizeFileRequest"; + /** Bytes with data */ + content: Buffer | undefined; + /** S3 data url */ + uri: string | undefined; + /** Configuration for speech recognition model. */ + recognitionModel?: RecognitionModelOptions; + /** Configuration for classifiers over speech recognition. */ + recognitionClassifier?: RecognitionClassifierOptions; + /** Configuration for speech analysis over speech recognition. */ + speechAnalysis?: SpeechAnalysisOptions; +} + /** Recognized word. */ export interface Word { $type: "speechkit.stt.v3.Word"; /** Word text. */ text: string; - /** Estimation of word start time in ms */ + /** Estimation of word start time in ms. */ startTimeMs: number; - /** Estimation of word end time in ms */ + /** Estimation of word end time in ms. */ endTimeMs: number; } +/** Estimation of language and its probability. */ +export interface LanguageEstimation { + $type: "speechkit.stt.v3.LanguageEstimation"; + /** Language code in ISO 639-1 format. */ + languageCode: string; + /** Estimation of language probability. */ + probability: number; +} + /** Recognition of specific time frame. */ export interface Alternative { $type: "speechkit.stt.v3.Alternative"; @@ -455,14 +601,16 @@ export interface Alternative { startTimeMs: number; /** End of time frame. */ endTimeMs: number; - /** Hypothesis confidence. Currently is not used. */ + /** The hypothesis confidence. Currently is not used. */ confidence: number; + /** Distribution over possible languages. */ + languages: LanguageEstimation[]; } -/** Update information from */ +/** Update information for external End of Utterance. */ export interface EouUpdate { $type: "speechkit.stt.v3.EouUpdate"; - /** End of utterance estimated time. */ + /** EOU estimated time. */ timeMs: number; } @@ -471,7 +619,7 @@ export interface AlternativeUpdate { $type: "speechkit.stt.v3.AlternativeUpdate"; /** List of hypothesis for timeframes. */ alternatives: Alternative[]; - /** Tag for distinguish audio channels. */ + /** @deprecated */ channelTag: string; } @@ -484,20 +632,20 @@ export interface AudioCursors { resetTimeMs: number; /** * How much audio was processed. This time includes trimming silences as well. This cursor is moved after server received enough data - * to update recognition results (includes silence as well). + * to update recognition results (includes silence as well). */ partialTimeMs: number; /** * Time of last final. This cursor is moved when server decides that recognition from start of audio until final_time_ms will not change anymore - * usually this even is followed by EOU detection (but this could change in future). + * usually this even is followed by EOU detection (but this could change in future). */ finalTimeMs: number; /** This is index of last final server send. Incremented after each new final. */ finalIndex: number; /** * Estimated time of EOU. Cursor is updated after each new EOU is sent. - * For external classifier this equals to received_data_ms at the moment EOU event arrives. - * For internal classifier this is estimation of time. The time is not exact and has the same guarantees as word timings. + * For external classifier this equals to received_data_ms at the moment EOU event arrives. + * For internal classifier this is estimation of time. The time is not exact and has the same guarantees as word timings. */ eouTimeMs: number; } @@ -529,6 +677,231 @@ export interface SessionUuid { userRequestId: string; } +export interface PhraseHighlight { + $type: "speechkit.stt.v3.PhraseHighlight"; + /** Text transcription of the highlighted audio segment */ + text: string; + /** Start time of the highlighted audio segment */ + startTimeMs: number; + /** End time of the highlighted audio segment */ + endTimeMs: number; +} + +export interface RecognitionClassifierLabel { + $type: "speechkit.stt.v3.RecognitionClassifierLabel"; + /** The label of the class predicted by the classifier */ + label: string; + /** The prediction confidence */ + confidence: number; +} + +export interface RecognitionClassifierResult { + $type: "speechkit.stt.v3.RecognitionClassifierResult"; + /** Name of the triggered classifier */ + classifier: string; + /** List of highlights, i.e. parts of phrase that determine the result of the classification */ + highlights: PhraseHighlight[]; + /** Classifier predictions */ + labels: RecognitionClassifierLabel[]; +} + +export interface RecognitionClassifierUpdate { + $type: "speechkit.stt.v3.RecognitionClassifierUpdate"; + /** Response window type */ + windowType: RecognitionClassifierUpdate_WindowType; + /** Start time of the audio segment used for classification */ + startTimeMs: number; + /** End time of the audio segment used for classification */ + endTimeMs: number; + /** Result for dictionary-based classifier */ + classifierResult?: RecognitionClassifierResult; +} + +export enum RecognitionClassifierUpdate_WindowType { + /** WINDOW_TYPE_UNSPECIFIED - Never used */ + WINDOW_TYPE_UNSPECIFIED = 0, + /** LAST_UTTERANCE - The result of applying the classifier to the last utterance response */ + LAST_UTTERANCE = 1, + /** LAST_FINAL - The result of applying the classifier to the last final response */ + LAST_FINAL = 2, + UNRECOGNIZED = -1, +} + +export function recognitionClassifierUpdate_WindowTypeFromJSON( + object: any +): RecognitionClassifierUpdate_WindowType { + switch (object) { + case 0: + case "WINDOW_TYPE_UNSPECIFIED": + return RecognitionClassifierUpdate_WindowType.WINDOW_TYPE_UNSPECIFIED; + case 1: + case "LAST_UTTERANCE": + return RecognitionClassifierUpdate_WindowType.LAST_UTTERANCE; + case 2: + case "LAST_FINAL": + return RecognitionClassifierUpdate_WindowType.LAST_FINAL; + case -1: + case "UNRECOGNIZED": + default: + return RecognitionClassifierUpdate_WindowType.UNRECOGNIZED; + } +} + +export function recognitionClassifierUpdate_WindowTypeToJSON( + object: RecognitionClassifierUpdate_WindowType +): string { + switch (object) { + case RecognitionClassifierUpdate_WindowType.WINDOW_TYPE_UNSPECIFIED: + return "WINDOW_TYPE_UNSPECIFIED"; + case RecognitionClassifierUpdate_WindowType.LAST_UTTERANCE: + return "LAST_UTTERANCE"; + case RecognitionClassifierUpdate_WindowType.LAST_FINAL: + return "LAST_FINAL"; + default: + return "UNKNOWN"; + } +} + +export interface DescriptiveStatistics { + $type: "speechkit.stt.v3.DescriptiveStatistics"; + /** Minimum observed value */ + min: number; + /** Maximum observed value */ + max: number; + /** Estimated mean of distribution */ + mean: number; + /** Estimated standard deviation of distribution */ + std: number; + /** List of evaluated quantiles */ + quantiles: DescriptiveStatistics_Quantile[]; +} + +export interface DescriptiveStatistics_Quantile { + $type: "speechkit.stt.v3.DescriptiveStatistics.Quantile"; + /** Quantile level in range (0, 1) */ + level: number; + /** Quantile value */ + value: number; +} + +export interface AudioSegmentBoundaries { + $type: "speechkit.stt.v3.AudioSegmentBoundaries"; + /** Audio segment start time */ + startTimeMs: number; + /** Audio segment end time */ + endTimeMs: number; +} + +export interface SpeakerAnalysis { + $type: "speechkit.stt.v3.SpeakerAnalysis"; + /** Speaker tag */ + speakerTag: string; + /** Response window type */ + windowType: SpeakerAnalysis_WindowType; + /** Audio segment boundaries */ + speechBoundaries?: AudioSegmentBoundaries; + /** Total speech duration */ + totalSpeechMs: number; + /** Speech ratio within audio segment */ + speechRatio: number; + /** Total silence duration */ + totalSilenceMs: number; + /** Silence ratio within audio segment */ + silenceRatio: number; + /** Number of words in recognized speech */ + wordsCount: number; + /** Number of letters in recognized speech */ + lettersCount: number; + /** Descriptive statistics for words per second distribution */ + wordsPerSecond?: DescriptiveStatistics; + /** Descriptive statistics for letters per second distribution */ + lettersPerSecond?: DescriptiveStatistics; + /** Descriptive statistics for words per utterance distribution */ + wordsPerUtterance?: DescriptiveStatistics; + /** Descriptive statistics for letters per utterance distribution */ + lettersPerUtterance?: DescriptiveStatistics; + /** Number of utterances */ + utteranceCount: number; + /** Descriptive statistics for utterance duration distribution */ + utteranceDurationEstimation?: DescriptiveStatistics; +} + +export enum SpeakerAnalysis_WindowType { + WINDOW_TYPE_UNSPECIFIED = 0, + /** TOTAL - Stats for all received audio */ + TOTAL = 1, + /** LAST_UTTERANCE - Stats for last utterance */ + LAST_UTTERANCE = 2, + UNRECOGNIZED = -1, +} + +export function speakerAnalysis_WindowTypeFromJSON( + object: any +): SpeakerAnalysis_WindowType { + switch (object) { + case 0: + case "WINDOW_TYPE_UNSPECIFIED": + return SpeakerAnalysis_WindowType.WINDOW_TYPE_UNSPECIFIED; + case 1: + case "TOTAL": + return SpeakerAnalysis_WindowType.TOTAL; + case 2: + case "LAST_UTTERANCE": + return SpeakerAnalysis_WindowType.LAST_UTTERANCE; + case -1: + case "UNRECOGNIZED": + default: + return SpeakerAnalysis_WindowType.UNRECOGNIZED; + } +} + +export function speakerAnalysis_WindowTypeToJSON( + object: SpeakerAnalysis_WindowType +): string { + switch (object) { + case SpeakerAnalysis_WindowType.WINDOW_TYPE_UNSPECIFIED: + return "WINDOW_TYPE_UNSPECIFIED"; + case SpeakerAnalysis_WindowType.TOTAL: + return "TOTAL"; + case SpeakerAnalysis_WindowType.LAST_UTTERANCE: + return "LAST_UTTERANCE"; + default: + return "UNKNOWN"; + } +} + +export interface ConversationAnalysis { + $type: "speechkit.stt.v3.ConversationAnalysis"; + /** Audio segment boundaries */ + conversationBoundaries?: AudioSegmentBoundaries; + /** Total simultaneous silence duration */ + totalSimultaneousSilenceDurationMs: number; + /** Simultaneous silence ratio within audio segment */ + totalSimultaneousSilenceRatio: number; + /** Descriptive statistics for simultaneous silence duration distribution */ + simultaneousSilenceDurationEstimation?: DescriptiveStatistics; + /** Total simultaneous speech duration */ + totalSimultaneousSpeechDurationMs: number; + /** Simultaneous speech ratio within audio segment */ + totalSimultaneousSpeechRatio: number; + /** Descriptive statistics for simultaneous speech duration distribution */ + simultaneousSpeechDurationEstimation?: DescriptiveStatistics; + /** Interrupts description for every speaker */ + speakerInterrupts: ConversationAnalysis_InterruptsEvaluation[]; +} + +export interface ConversationAnalysis_InterruptsEvaluation { + $type: "speechkit.stt.v3.ConversationAnalysis.InterruptsEvaluation"; + /** Speaker tag */ + speakerTag: string; + /** Number of interrupts made by the speaker */ + interruptsCount: number; + /** Total duration of all interrupts */ + interruptsDurationMs: number; + /** Boundaries for every interrupt */ + interrupts: AudioSegmentBoundaries[]; +} + /** * Responses from server. * Each response contains session uuid @@ -545,23 +918,31 @@ export interface StreamingResponse { responseWallTimeMs: number; /** * Partial results, server will send them regularly after enough audio data was received from user. This are current text estimation - * from final_time_ms to partial_time_ms. Could change after new data will arrive. + * from final_time_ms to partial_time_ms. Could change after new data will arrive. */ partial?: AlternativeUpdate | undefined; /** Final results, the recognition is now fixed until final_time_ms. For now, final is sent only if the EOU event was triggered. This could be change in future releases. */ final?: AlternativeUpdate | undefined; /** * After EOU classifier, send the message with final, send the EouUpdate with time of EOU - * before eou_update we send final with the same time. there could be several finals before eou update. + * before eou_update we send final with the same time. there could be several finals before eou update. */ eouUpdate?: EouUpdate | undefined; /** * For each final, if normalization is enabled, sent the normalized text (or some other advanced post-processing). - * Final normalization will introduce additional latency. + * Final normalization will introduce additional latency. */ finalRefinement?: FinalRefinement | undefined; /** Status messages, send by server with fixed interval (keep-alive). */ statusCode?: StatusCode | undefined; + /** Result of the triggered classifier */ + classifierUpdate?: RecognitionClassifierUpdate | undefined; + /** Speech statistics for every speaker */ + speakerAnalysis?: SpeakerAnalysis | undefined; + /** Conversation statistics */ + conversationAnalysis?: ConversationAnalysis | undefined; + /** Tag for distinguish audio channels. */ + channelTag: string; } const baseTextNormalizationOptions: object = { @@ -569,6 +950,7 @@ const baseTextNormalizationOptions: object = { textNormalization: 0, profanityFilter: false, literatureText: false, + phoneFormattingMode: 0, }; export const TextNormalizationOptions = { @@ -587,6 +969,9 @@ export const TextNormalizationOptions = { if (message.literatureText === true) { writer.uint32(24).bool(message.literatureText); } + if (message.phoneFormattingMode !== 0) { + writer.uint32(32).int32(message.phoneFormattingMode); + } return writer; }, @@ -611,6 +996,9 @@ export const TextNormalizationOptions = { case 3: message.literatureText = reader.bool(); break; + case 4: + message.phoneFormattingMode = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -638,6 +1026,13 @@ export const TextNormalizationOptions = { object.literatureText !== undefined && object.literatureText !== null ? Boolean(object.literatureText) : false; + message.phoneFormattingMode = + object.phoneFormattingMode !== undefined && + object.phoneFormattingMode !== null + ? textNormalizationOptions_PhoneFormattingModeFromJSON( + object.phoneFormattingMode + ) + : 0; return message; }, @@ -651,6 +1046,11 @@ export const TextNormalizationOptions = { (obj.profanityFilter = message.profanityFilter); message.literatureText !== undefined && (obj.literatureText = message.literatureText); + message.phoneFormattingMode !== undefined && + (obj.phoneFormattingMode = + textNormalizationOptions_PhoneFormattingModeToJSON( + message.phoneFormattingMode + )); return obj; }, @@ -663,6 +1063,7 @@ export const TextNormalizationOptions = { message.textNormalization = object.textNormalization ?? 0; message.profanityFilter = object.profanityFilter ?? false; message.literatureText = object.literatureText ?? false; + message.phoneFormattingMode = object.phoneFormattingMode ?? 0; return message; }, }; @@ -913,47 +1314,53 @@ export const EouClassifierOptions = { messageTypeRegistry.set(EouClassifierOptions.$type, EouClassifierOptions); -const baseRawAudio: object = { - $type: "speechkit.stt.v3.RawAudio", - audioEncoding: 0, - sampleRateHertz: 0, - audioChannelCount: 0, +const baseRecognitionClassifier: object = { + $type: "speechkit.stt.v3.RecognitionClassifier", + classifier: "", + triggers: 0, }; -export const RawAudio = { - $type: "speechkit.stt.v3.RawAudio" as const, +export const RecognitionClassifier = { + $type: "speechkit.stt.v3.RecognitionClassifier" as const, encode( - message: RawAudio, + message: RecognitionClassifier, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.audioEncoding !== 0) { - writer.uint32(8).int32(message.audioEncoding); + if (message.classifier !== "") { + writer.uint32(10).string(message.classifier); } - if (message.sampleRateHertz !== 0) { - writer.uint32(16).int64(message.sampleRateHertz); - } - if (message.audioChannelCount !== 0) { - writer.uint32(24).int64(message.audioChannelCount); + writer.uint32(18).fork(); + for (const v of message.triggers) { + writer.int32(v); } + writer.ldelim(); return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): RawAudio { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RecognitionClassifier { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseRawAudio } as RawAudio; + const message = { ...baseRecognitionClassifier } as RecognitionClassifier; + message.triggers = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.audioEncoding = reader.int32() as any; + message.classifier = reader.string(); break; case 2: - message.sampleRateHertz = longToNumber(reader.int64() as Long); - break; - case 3: - message.audioChannelCount = longToNumber(reader.int64() as Long); + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.triggers.push(reader.int32() as any); + } + } else { + message.triggers.push(reader.int32() as any); + } break; default: reader.skipType(tag & 7); @@ -963,73 +1370,77 @@ export const RawAudio = { return message; }, - fromJSON(object: any): RawAudio { - const message = { ...baseRawAudio } as RawAudio; - message.audioEncoding = - object.audioEncoding !== undefined && object.audioEncoding !== null - ? rawAudio_AudioEncodingFromJSON(object.audioEncoding) - : 0; - message.sampleRateHertz = - object.sampleRateHertz !== undefined && object.sampleRateHertz !== null - ? Number(object.sampleRateHertz) - : 0; - message.audioChannelCount = - object.audioChannelCount !== undefined && - object.audioChannelCount !== null - ? Number(object.audioChannelCount) - : 0; + fromJSON(object: any): RecognitionClassifier { + const message = { ...baseRecognitionClassifier } as RecognitionClassifier; + message.classifier = + object.classifier !== undefined && object.classifier !== null + ? String(object.classifier) + : ""; + message.triggers = (object.triggers ?? []).map((e: any) => + recognitionClassifier_TriggerTypeFromJSON(e) + ); return message; }, - toJSON(message: RawAudio): unknown { + toJSON(message: RecognitionClassifier): unknown { const obj: any = {}; - message.audioEncoding !== undefined && - (obj.audioEncoding = rawAudio_AudioEncodingToJSON(message.audioEncoding)); - message.sampleRateHertz !== undefined && - (obj.sampleRateHertz = Math.round(message.sampleRateHertz)); - message.audioChannelCount !== undefined && - (obj.audioChannelCount = Math.round(message.audioChannelCount)); + message.classifier !== undefined && (obj.classifier = message.classifier); + if (message.triggers) { + obj.triggers = message.triggers.map((e) => + recognitionClassifier_TriggerTypeToJSON(e) + ); + } else { + obj.triggers = []; + } return obj; }, - fromPartial, I>>(object: I): RawAudio { - const message = { ...baseRawAudio } as RawAudio; - message.audioEncoding = object.audioEncoding ?? 0; - message.sampleRateHertz = object.sampleRateHertz ?? 0; - message.audioChannelCount = object.audioChannelCount ?? 0; + fromPartial, I>>( + object: I + ): RecognitionClassifier { + const message = { ...baseRecognitionClassifier } as RecognitionClassifier; + message.classifier = object.classifier ?? ""; + message.triggers = object.triggers?.map((e) => e) || []; return message; }, }; -messageTypeRegistry.set(RawAudio.$type, RawAudio); +messageTypeRegistry.set(RecognitionClassifier.$type, RecognitionClassifier); -const baseContainerAudio: object = { - $type: "speechkit.stt.v3.ContainerAudio", - containerAudioType: 0, +const baseRecognitionClassifierOptions: object = { + $type: "speechkit.stt.v3.RecognitionClassifierOptions", }; -export const ContainerAudio = { - $type: "speechkit.stt.v3.ContainerAudio" as const, +export const RecognitionClassifierOptions = { + $type: "speechkit.stt.v3.RecognitionClassifierOptions" as const, encode( - message: ContainerAudio, + message: RecognitionClassifierOptions, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.containerAudioType !== 0) { - writer.uint32(8).int32(message.containerAudioType); + for (const v of message.classifiers) { + RecognitionClassifier.encode(v!, writer.uint32(10).fork()).ldelim(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ContainerAudio { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RecognitionClassifierOptions { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseContainerAudio } as ContainerAudio; + const message = { + ...baseRecognitionClassifierOptions, + } as RecognitionClassifierOptions; + message.classifiers = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.containerAudioType = reader.int32() as any; + message.classifiers.push( + RecognitionClassifier.decode(reader, reader.uint32()) + ); break; default: reader.skipType(tag & 7); @@ -1039,73 +1450,350 @@ export const ContainerAudio = { return message; }, - fromJSON(object: any): ContainerAudio { - const message = { ...baseContainerAudio } as ContainerAudio; - message.containerAudioType = - object.containerAudioType !== undefined && - object.containerAudioType !== null - ? containerAudio_ContainerAudioTypeFromJSON(object.containerAudioType) - : 0; + fromJSON(object: any): RecognitionClassifierOptions { + const message = { + ...baseRecognitionClassifierOptions, + } as RecognitionClassifierOptions; + message.classifiers = (object.classifiers ?? []).map((e: any) => + RecognitionClassifier.fromJSON(e) + ); return message; }, - toJSON(message: ContainerAudio): unknown { + toJSON(message: RecognitionClassifierOptions): unknown { const obj: any = {}; - message.containerAudioType !== undefined && - (obj.containerAudioType = containerAudio_ContainerAudioTypeToJSON( - message.containerAudioType - )); + if (message.classifiers) { + obj.classifiers = message.classifiers.map((e) => + e ? RecognitionClassifier.toJSON(e) : undefined + ); + } else { + obj.classifiers = []; + } return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): ContainerAudio { - const message = { ...baseContainerAudio } as ContainerAudio; - message.containerAudioType = object.containerAudioType ?? 0; + ): RecognitionClassifierOptions { + const message = { + ...baseRecognitionClassifierOptions, + } as RecognitionClassifierOptions; + message.classifiers = + object.classifiers?.map((e) => RecognitionClassifier.fromPartial(e)) || + []; return message; }, }; -messageTypeRegistry.set(ContainerAudio.$type, ContainerAudio); +messageTypeRegistry.set( + RecognitionClassifierOptions.$type, + RecognitionClassifierOptions +); -const baseAudioFormatOptions: object = { - $type: "speechkit.stt.v3.AudioFormatOptions", +const baseSpeechAnalysisOptions: object = { + $type: "speechkit.stt.v3.SpeechAnalysisOptions", + enableSpeakerAnalysis: false, + enableConversationAnalysis: false, + descriptiveStatisticsQuantiles: 0, }; -export const AudioFormatOptions = { - $type: "speechkit.stt.v3.AudioFormatOptions" as const, +export const SpeechAnalysisOptions = { + $type: "speechkit.stt.v3.SpeechAnalysisOptions" as const, encode( - message: AudioFormatOptions, + message: SpeechAnalysisOptions, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.rawAudio !== undefined) { - RawAudio.encode(message.rawAudio, writer.uint32(10).fork()).ldelim(); + if (message.enableSpeakerAnalysis === true) { + writer.uint32(8).bool(message.enableSpeakerAnalysis); } - if (message.containerAudio !== undefined) { - ContainerAudio.encode( - message.containerAudio, - writer.uint32(18).fork() - ).ldelim(); + if (message.enableConversationAnalysis === true) { + writer.uint32(16).bool(message.enableConversationAnalysis); } + writer.uint32(26).fork(); + for (const v of message.descriptiveStatisticsQuantiles) { + writer.double(v); + } + writer.ldelim(); return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): AudioFormatOptions { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SpeechAnalysisOptions { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseAudioFormatOptions } as AudioFormatOptions; + const message = { ...baseSpeechAnalysisOptions } as SpeechAnalysisOptions; + message.descriptiveStatisticsQuantiles = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.rawAudio = RawAudio.decode(reader, reader.uint32()); + message.enableSpeakerAnalysis = reader.bool(); break; case 2: - message.containerAudio = ContainerAudio.decode( - reader, - reader.uint32() + message.enableConversationAnalysis = reader.bool(); + break; + case 3: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.descriptiveStatisticsQuantiles.push(reader.double()); + } + } else { + message.descriptiveStatisticsQuantiles.push(reader.double()); + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SpeechAnalysisOptions { + const message = { ...baseSpeechAnalysisOptions } as SpeechAnalysisOptions; + message.enableSpeakerAnalysis = + object.enableSpeakerAnalysis !== undefined && + object.enableSpeakerAnalysis !== null + ? Boolean(object.enableSpeakerAnalysis) + : false; + message.enableConversationAnalysis = + object.enableConversationAnalysis !== undefined && + object.enableConversationAnalysis !== null + ? Boolean(object.enableConversationAnalysis) + : false; + message.descriptiveStatisticsQuantiles = ( + object.descriptiveStatisticsQuantiles ?? [] + ).map((e: any) => Number(e)); + return message; + }, + + toJSON(message: SpeechAnalysisOptions): unknown { + const obj: any = {}; + message.enableSpeakerAnalysis !== undefined && + (obj.enableSpeakerAnalysis = message.enableSpeakerAnalysis); + message.enableConversationAnalysis !== undefined && + (obj.enableConversationAnalysis = message.enableConversationAnalysis); + if (message.descriptiveStatisticsQuantiles) { + obj.descriptiveStatisticsQuantiles = + message.descriptiveStatisticsQuantiles.map((e) => e); + } else { + obj.descriptiveStatisticsQuantiles = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): SpeechAnalysisOptions { + const message = { ...baseSpeechAnalysisOptions } as SpeechAnalysisOptions; + message.enableSpeakerAnalysis = object.enableSpeakerAnalysis ?? false; + message.enableConversationAnalysis = + object.enableConversationAnalysis ?? false; + message.descriptiveStatisticsQuantiles = + object.descriptiveStatisticsQuantiles?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(SpeechAnalysisOptions.$type, SpeechAnalysisOptions); + +const baseRawAudio: object = { + $type: "speechkit.stt.v3.RawAudio", + audioEncoding: 0, + sampleRateHertz: 0, + audioChannelCount: 0, +}; + +export const RawAudio = { + $type: "speechkit.stt.v3.RawAudio" as const, + + encode( + message: RawAudio, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.audioEncoding !== 0) { + writer.uint32(8).int32(message.audioEncoding); + } + if (message.sampleRateHertz !== 0) { + writer.uint32(16).int64(message.sampleRateHertz); + } + if (message.audioChannelCount !== 0) { + writer.uint32(24).int64(message.audioChannelCount); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RawAudio { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRawAudio } as RawAudio; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.audioEncoding = reader.int32() as any; + break; + case 2: + message.sampleRateHertz = longToNumber(reader.int64() as Long); + break; + case 3: + message.audioChannelCount = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RawAudio { + const message = { ...baseRawAudio } as RawAudio; + message.audioEncoding = + object.audioEncoding !== undefined && object.audioEncoding !== null + ? rawAudio_AudioEncodingFromJSON(object.audioEncoding) + : 0; + message.sampleRateHertz = + object.sampleRateHertz !== undefined && object.sampleRateHertz !== null + ? Number(object.sampleRateHertz) + : 0; + message.audioChannelCount = + object.audioChannelCount !== undefined && + object.audioChannelCount !== null + ? Number(object.audioChannelCount) + : 0; + return message; + }, + + toJSON(message: RawAudio): unknown { + const obj: any = {}; + message.audioEncoding !== undefined && + (obj.audioEncoding = rawAudio_AudioEncodingToJSON(message.audioEncoding)); + message.sampleRateHertz !== undefined && + (obj.sampleRateHertz = Math.round(message.sampleRateHertz)); + message.audioChannelCount !== undefined && + (obj.audioChannelCount = Math.round(message.audioChannelCount)); + return obj; + }, + + fromPartial, I>>(object: I): RawAudio { + const message = { ...baseRawAudio } as RawAudio; + message.audioEncoding = object.audioEncoding ?? 0; + message.sampleRateHertz = object.sampleRateHertz ?? 0; + message.audioChannelCount = object.audioChannelCount ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(RawAudio.$type, RawAudio); + +const baseContainerAudio: object = { + $type: "speechkit.stt.v3.ContainerAudio", + containerAudioType: 0, +}; + +export const ContainerAudio = { + $type: "speechkit.stt.v3.ContainerAudio" as const, + + encode( + message: ContainerAudio, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.containerAudioType !== 0) { + writer.uint32(8).int32(message.containerAudioType); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ContainerAudio { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseContainerAudio } as ContainerAudio; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.containerAudioType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ContainerAudio { + const message = { ...baseContainerAudio } as ContainerAudio; + message.containerAudioType = + object.containerAudioType !== undefined && + object.containerAudioType !== null + ? containerAudio_ContainerAudioTypeFromJSON(object.containerAudioType) + : 0; + return message; + }, + + toJSON(message: ContainerAudio): unknown { + const obj: any = {}; + message.containerAudioType !== undefined && + (obj.containerAudioType = containerAudio_ContainerAudioTypeToJSON( + message.containerAudioType + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): ContainerAudio { + const message = { ...baseContainerAudio } as ContainerAudio; + message.containerAudioType = object.containerAudioType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ContainerAudio.$type, ContainerAudio); + +const baseAudioFormatOptions: object = { + $type: "speechkit.stt.v3.AudioFormatOptions", +}; + +export const AudioFormatOptions = { + $type: "speechkit.stt.v3.AudioFormatOptions" as const, + + encode( + message: AudioFormatOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.rawAudio !== undefined) { + RawAudio.encode(message.rawAudio, writer.uint32(10).fork()).ldelim(); + } + if (message.containerAudio !== undefined) { + ContainerAudio.encode( + message.containerAudio, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AudioFormatOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAudioFormatOptions } as AudioFormatOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.rawAudio = RawAudio.decode(reader, reader.uint32()); + break; + case 2: + message.containerAudio = ContainerAudio.decode( + reader, + reader.uint32() ); break; default: @@ -1447,6 +2135,18 @@ export const StreamingOptions = { writer.uint32(18).fork() ).ldelim(); } + if (message.recognitionClassifier !== undefined) { + RecognitionClassifierOptions.encode( + message.recognitionClassifier, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.speechAnalysis !== undefined) { + SpeechAnalysisOptions.encode( + message.speechAnalysis, + writer.uint32(34).fork() + ).ldelim(); + } return writer; }, @@ -1469,6 +2169,18 @@ export const StreamingOptions = { reader.uint32() ); break; + case 3: + message.recognitionClassifier = RecognitionClassifierOptions.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.speechAnalysis = SpeechAnalysisOptions.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -1487,6 +2199,15 @@ export const StreamingOptions = { object.eouClassifier !== undefined && object.eouClassifier !== null ? EouClassifierOptions.fromJSON(object.eouClassifier) : undefined; + message.recognitionClassifier = + object.recognitionClassifier !== undefined && + object.recognitionClassifier !== null + ? RecognitionClassifierOptions.fromJSON(object.recognitionClassifier) + : undefined; + message.speechAnalysis = + object.speechAnalysis !== undefined && object.speechAnalysis !== null + ? SpeechAnalysisOptions.fromJSON(object.speechAnalysis) + : undefined; return message; }, @@ -1500,6 +2221,14 @@ export const StreamingOptions = { (obj.eouClassifier = message.eouClassifier ? EouClassifierOptions.toJSON(message.eouClassifier) : undefined); + message.recognitionClassifier !== undefined && + (obj.recognitionClassifier = message.recognitionClassifier + ? RecognitionClassifierOptions.toJSON(message.recognitionClassifier) + : undefined); + message.speechAnalysis !== undefined && + (obj.speechAnalysis = message.speechAnalysis + ? SpeechAnalysisOptions.toJSON(message.speechAnalysis) + : undefined); return obj; }, @@ -1515,6 +2244,15 @@ export const StreamingOptions = { object.eouClassifier !== undefined && object.eouClassifier !== null ? EouClassifierOptions.fromPartial(object.eouClassifier) : undefined; + message.recognitionClassifier = + object.recognitionClassifier !== undefined && + object.recognitionClassifier !== null + ? RecognitionClassifierOptions.fromPartial(object.recognitionClassifier) + : undefined; + message.speechAnalysis = + object.speechAnalysis !== undefined && object.speechAnalysis !== null + ? SpeechAnalysisOptions.fromPartial(object.speechAnalysis) + : undefined; return message; }, }; @@ -1817,19 +2555,173 @@ export const StreamingRequest = { messageTypeRegistry.set(StreamingRequest.$type, StreamingRequest); -const baseWord: object = { - $type: "speechkit.stt.v3.Word", - text: "", - startTimeMs: 0, - endTimeMs: 0, +const baseRecognizeFileRequest: object = { + $type: "speechkit.stt.v3.RecognizeFileRequest", }; -export const Word = { - $type: "speechkit.stt.v3.Word" as const, +export const RecognizeFileRequest = { + $type: "speechkit.stt.v3.RecognizeFileRequest" as const, - encode(message: Word, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.text !== "") { - writer.uint32(10).string(message.text); + encode( + message: RecognizeFileRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.content !== undefined) { + writer.uint32(10).bytes(message.content); + } + if (message.uri !== undefined) { + writer.uint32(18).string(message.uri); + } + if (message.recognitionModel !== undefined) { + RecognitionModelOptions.encode( + message.recognitionModel, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.recognitionClassifier !== undefined) { + RecognitionClassifierOptions.encode( + message.recognitionClassifier, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.speechAnalysis !== undefined) { + SpeechAnalysisOptions.encode( + message.speechAnalysis, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RecognizeFileRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRecognizeFileRequest } as RecognizeFileRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.content = reader.bytes() as Buffer; + break; + case 2: + message.uri = reader.string(); + break; + case 3: + message.recognitionModel = RecognitionModelOptions.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.recognitionClassifier = RecognitionClassifierOptions.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.speechAnalysis = SpeechAnalysisOptions.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RecognizeFileRequest { + const message = { ...baseRecognizeFileRequest } as RecognizeFileRequest; + message.content = + object.content !== undefined && object.content !== null + ? Buffer.from(bytesFromBase64(object.content)) + : undefined; + message.uri = + object.uri !== undefined && object.uri !== null + ? String(object.uri) + : undefined; + message.recognitionModel = + object.recognitionModel !== undefined && object.recognitionModel !== null + ? RecognitionModelOptions.fromJSON(object.recognitionModel) + : undefined; + message.recognitionClassifier = + object.recognitionClassifier !== undefined && + object.recognitionClassifier !== null + ? RecognitionClassifierOptions.fromJSON(object.recognitionClassifier) + : undefined; + message.speechAnalysis = + object.speechAnalysis !== undefined && object.speechAnalysis !== null + ? SpeechAnalysisOptions.fromJSON(object.speechAnalysis) + : undefined; + return message; + }, + + toJSON(message: RecognizeFileRequest): unknown { + const obj: any = {}; + message.content !== undefined && + (obj.content = + message.content !== undefined + ? base64FromBytes(message.content) + : undefined); + message.uri !== undefined && (obj.uri = message.uri); + message.recognitionModel !== undefined && + (obj.recognitionModel = message.recognitionModel + ? RecognitionModelOptions.toJSON(message.recognitionModel) + : undefined); + message.recognitionClassifier !== undefined && + (obj.recognitionClassifier = message.recognitionClassifier + ? RecognitionClassifierOptions.toJSON(message.recognitionClassifier) + : undefined); + message.speechAnalysis !== undefined && + (obj.speechAnalysis = message.speechAnalysis + ? SpeechAnalysisOptions.toJSON(message.speechAnalysis) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): RecognizeFileRequest { + const message = { ...baseRecognizeFileRequest } as RecognizeFileRequest; + message.content = object.content ?? undefined; + message.uri = object.uri ?? undefined; + message.recognitionModel = + object.recognitionModel !== undefined && object.recognitionModel !== null + ? RecognitionModelOptions.fromPartial(object.recognitionModel) + : undefined; + message.recognitionClassifier = + object.recognitionClassifier !== undefined && + object.recognitionClassifier !== null + ? RecognitionClassifierOptions.fromPartial(object.recognitionClassifier) + : undefined; + message.speechAnalysis = + object.speechAnalysis !== undefined && object.speechAnalysis !== null + ? SpeechAnalysisOptions.fromPartial(object.speechAnalysis) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(RecognizeFileRequest.$type, RecognizeFileRequest); + +const baseWord: object = { + $type: "speechkit.stt.v3.Word", + text: "", + startTimeMs: 0, + endTimeMs: 0, +}; + +export const Word = { + $type: "speechkit.stt.v3.Word" as const, + + encode(message: Word, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.text !== "") { + writer.uint32(10).string(message.text); } if (message.startTimeMs !== 0) { writer.uint32(16).int64(message.startTimeMs); @@ -1902,6 +2794,83 @@ export const Word = { messageTypeRegistry.set(Word.$type, Word); +const baseLanguageEstimation: object = { + $type: "speechkit.stt.v3.LanguageEstimation", + languageCode: "", + probability: 0, +}; + +export const LanguageEstimation = { + $type: "speechkit.stt.v3.LanguageEstimation" as const, + + encode( + message: LanguageEstimation, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.languageCode !== "") { + writer.uint32(10).string(message.languageCode); + } + if (message.probability !== 0) { + writer.uint32(17).double(message.probability); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LanguageEstimation { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLanguageEstimation } as LanguageEstimation; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.languageCode = reader.string(); + break; + case 2: + message.probability = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LanguageEstimation { + const message = { ...baseLanguageEstimation } as LanguageEstimation; + message.languageCode = + object.languageCode !== undefined && object.languageCode !== null + ? String(object.languageCode) + : ""; + message.probability = + object.probability !== undefined && object.probability !== null + ? Number(object.probability) + : 0; + return message; + }, + + toJSON(message: LanguageEstimation): unknown { + const obj: any = {}; + message.languageCode !== undefined && + (obj.languageCode = message.languageCode); + message.probability !== undefined && + (obj.probability = message.probability); + return obj; + }, + + fromPartial, I>>( + object: I + ): LanguageEstimation { + const message = { ...baseLanguageEstimation } as LanguageEstimation; + message.languageCode = object.languageCode ?? ""; + message.probability = object.probability ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(LanguageEstimation.$type, LanguageEstimation); + const baseAlternative: object = { $type: "speechkit.stt.v3.Alternative", text: "", @@ -1932,6 +2901,9 @@ export const Alternative = { if (message.confidence !== 0) { writer.uint32(41).double(message.confidence); } + for (const v of message.languages) { + LanguageEstimation.encode(v!, writer.uint32(50).fork()).ldelim(); + } return writer; }, @@ -1940,6 +2912,7 @@ export const Alternative = { let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseAlternative } as Alternative; message.words = []; + message.languages = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1958,6 +2931,11 @@ export const Alternative = { case 5: message.confidence = reader.double(); break; + case 6: + message.languages.push( + LanguageEstimation.decode(reader, reader.uint32()) + ); + break; default: reader.skipType(tag & 7); break; @@ -1985,6 +2963,9 @@ export const Alternative = { object.confidence !== undefined && object.confidence !== null ? Number(object.confidence) : 0; + message.languages = (object.languages ?? []).map((e: any) => + LanguageEstimation.fromJSON(e) + ); return message; }, @@ -2001,6 +2982,13 @@ export const Alternative = { message.endTimeMs !== undefined && (obj.endTimeMs = Math.round(message.endTimeMs)); message.confidence !== undefined && (obj.confidence = message.confidence); + if (message.languages) { + obj.languages = message.languages.map((e) => + e ? LanguageEstimation.toJSON(e) : undefined + ); + } else { + obj.languages = []; + } return obj; }, @@ -2013,6 +3001,8 @@ export const Alternative = { message.startTimeMs = object.startTimeMs ?? 0; message.endTimeMs = object.endTimeMs ?? 0; message.confidence = object.confidence ?? 0; + message.languages = + object.languages?.map((e) => LanguageEstimation.fromPartial(e)) || []; return message; }, }; @@ -2536,93 +3526,47 @@ export const SessionUuid = { messageTypeRegistry.set(SessionUuid.$type, SessionUuid); -const baseStreamingResponse: object = { - $type: "speechkit.stt.v3.StreamingResponse", - responseWallTimeMs: 0, +const basePhraseHighlight: object = { + $type: "speechkit.stt.v3.PhraseHighlight", + text: "", + startTimeMs: 0, + endTimeMs: 0, }; -export const StreamingResponse = { - $type: "speechkit.stt.v3.StreamingResponse" as const, +export const PhraseHighlight = { + $type: "speechkit.stt.v3.PhraseHighlight" as const, encode( - message: StreamingResponse, + message: PhraseHighlight, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.sessionUuid !== undefined) { - SessionUuid.encode( - message.sessionUuid, - writer.uint32(10).fork() - ).ldelim(); - } - if (message.audioCursors !== undefined) { - AudioCursors.encode( - message.audioCursors, - writer.uint32(18).fork() - ).ldelim(); - } - if (message.responseWallTimeMs !== 0) { - writer.uint32(24).int64(message.responseWallTimeMs); - } - if (message.partial !== undefined) { - AlternativeUpdate.encode( - message.partial, - writer.uint32(34).fork() - ).ldelim(); - } - if (message.final !== undefined) { - AlternativeUpdate.encode( - message.final, - writer.uint32(42).fork() - ).ldelim(); - } - if (message.eouUpdate !== undefined) { - EouUpdate.encode(message.eouUpdate, writer.uint32(50).fork()).ldelim(); + if (message.text !== "") { + writer.uint32(10).string(message.text); } - if (message.finalRefinement !== undefined) { - FinalRefinement.encode( - message.finalRefinement, - writer.uint32(58).fork() - ).ldelim(); + if (message.startTimeMs !== 0) { + writer.uint32(16).int64(message.startTimeMs); } - if (message.statusCode !== undefined) { - StatusCode.encode(message.statusCode, writer.uint32(66).fork()).ldelim(); + if (message.endTimeMs !== 0) { + writer.uint32(24).int64(message.endTimeMs); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): StreamingResponse { + decode(input: _m0.Reader | Uint8Array, length?: number): PhraseHighlight { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseStreamingResponse } as StreamingResponse; + const message = { ...basePhraseHighlight } as PhraseHighlight; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.sessionUuid = SessionUuid.decode(reader, reader.uint32()); + message.text = reader.string(); break; case 2: - message.audioCursors = AudioCursors.decode(reader, reader.uint32()); + message.startTimeMs = longToNumber(reader.int64() as Long); break; case 3: - message.responseWallTimeMs = longToNumber(reader.int64() as Long); - break; - case 4: - message.partial = AlternativeUpdate.decode(reader, reader.uint32()); - break; - case 5: - message.final = AlternativeUpdate.decode(reader, reader.uint32()); - break; - case 6: - message.eouUpdate = EouUpdate.decode(reader, reader.uint32()); - break; - case 7: - message.finalRefinement = FinalRefinement.decode( - reader, - reader.uint32() - ); - break; - case 8: - message.statusCode = StatusCode.decode(reader, reader.uint32()); + message.endTimeMs = longToNumber(reader.int64() as Long); break; default: reader.skipType(tag & 7); @@ -2632,28 +3576,1522 @@ export const StreamingResponse = { return message; }, - fromJSON(object: any): StreamingResponse { - const message = { ...baseStreamingResponse } as StreamingResponse; - message.sessionUuid = - object.sessionUuid !== undefined && object.sessionUuid !== null - ? SessionUuid.fromJSON(object.sessionUuid) - : undefined; - message.audioCursors = - object.audioCursors !== undefined && object.audioCursors !== null - ? AudioCursors.fromJSON(object.audioCursors) - : undefined; - message.responseWallTimeMs = - object.responseWallTimeMs !== undefined && - object.responseWallTimeMs !== null - ? Number(object.responseWallTimeMs) + fromJSON(object: any): PhraseHighlight { + const message = { ...basePhraseHighlight } as PhraseHighlight; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + message.startTimeMs = + object.startTimeMs !== undefined && object.startTimeMs !== null + ? Number(object.startTimeMs) : 0; - message.partial = - object.partial !== undefined && object.partial !== null - ? AlternativeUpdate.fromJSON(object.partial) - : undefined; - message.final = - object.final !== undefined && object.final !== null - ? AlternativeUpdate.fromJSON(object.final) + message.endTimeMs = + object.endTimeMs !== undefined && object.endTimeMs !== null + ? Number(object.endTimeMs) + : 0; + return message; + }, + + toJSON(message: PhraseHighlight): unknown { + const obj: any = {}; + message.text !== undefined && (obj.text = message.text); + message.startTimeMs !== undefined && + (obj.startTimeMs = Math.round(message.startTimeMs)); + message.endTimeMs !== undefined && + (obj.endTimeMs = Math.round(message.endTimeMs)); + return obj; + }, + + fromPartial, I>>( + object: I + ): PhraseHighlight { + const message = { ...basePhraseHighlight } as PhraseHighlight; + message.text = object.text ?? ""; + message.startTimeMs = object.startTimeMs ?? 0; + message.endTimeMs = object.endTimeMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(PhraseHighlight.$type, PhraseHighlight); + +const baseRecognitionClassifierLabel: object = { + $type: "speechkit.stt.v3.RecognitionClassifierLabel", + label: "", + confidence: 0, +}; + +export const RecognitionClassifierLabel = { + $type: "speechkit.stt.v3.RecognitionClassifierLabel" as const, + + encode( + message: RecognitionClassifierLabel, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.label !== "") { + writer.uint32(10).string(message.label); + } + if (message.confidence !== 0) { + writer.uint32(17).double(message.confidence); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RecognitionClassifierLabel { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRecognitionClassifierLabel, + } as RecognitionClassifierLabel; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.label = reader.string(); + break; + case 2: + message.confidence = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RecognitionClassifierLabel { + const message = { + ...baseRecognitionClassifierLabel, + } as RecognitionClassifierLabel; + message.label = + object.label !== undefined && object.label !== null + ? String(object.label) + : ""; + message.confidence = + object.confidence !== undefined && object.confidence !== null + ? Number(object.confidence) + : 0; + return message; + }, + + toJSON(message: RecognitionClassifierLabel): unknown { + const obj: any = {}; + message.label !== undefined && (obj.label = message.label); + message.confidence !== undefined && (obj.confidence = message.confidence); + return obj; + }, + + fromPartial, I>>( + object: I + ): RecognitionClassifierLabel { + const message = { + ...baseRecognitionClassifierLabel, + } as RecognitionClassifierLabel; + message.label = object.label ?? ""; + message.confidence = object.confidence ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + RecognitionClassifierLabel.$type, + RecognitionClassifierLabel +); + +const baseRecognitionClassifierResult: object = { + $type: "speechkit.stt.v3.RecognitionClassifierResult", + classifier: "", +}; + +export const RecognitionClassifierResult = { + $type: "speechkit.stt.v3.RecognitionClassifierResult" as const, + + encode( + message: RecognitionClassifierResult, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.classifier !== "") { + writer.uint32(10).string(message.classifier); + } + for (const v of message.highlights) { + PhraseHighlight.encode(v!, writer.uint32(18).fork()).ldelim(); + } + for (const v of message.labels) { + RecognitionClassifierLabel.encode(v!, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RecognitionClassifierResult { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRecognitionClassifierResult, + } as RecognitionClassifierResult; + message.highlights = []; + message.labels = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.classifier = reader.string(); + break; + case 2: + message.highlights.push( + PhraseHighlight.decode(reader, reader.uint32()) + ); + break; + case 3: + message.labels.push( + RecognitionClassifierLabel.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RecognitionClassifierResult { + const message = { + ...baseRecognitionClassifierResult, + } as RecognitionClassifierResult; + message.classifier = + object.classifier !== undefined && object.classifier !== null + ? String(object.classifier) + : ""; + message.highlights = (object.highlights ?? []).map((e: any) => + PhraseHighlight.fromJSON(e) + ); + message.labels = (object.labels ?? []).map((e: any) => + RecognitionClassifierLabel.fromJSON(e) + ); + return message; + }, + + toJSON(message: RecognitionClassifierResult): unknown { + const obj: any = {}; + message.classifier !== undefined && (obj.classifier = message.classifier); + if (message.highlights) { + obj.highlights = message.highlights.map((e) => + e ? PhraseHighlight.toJSON(e) : undefined + ); + } else { + obj.highlights = []; + } + if (message.labels) { + obj.labels = message.labels.map((e) => + e ? RecognitionClassifierLabel.toJSON(e) : undefined + ); + } else { + obj.labels = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): RecognitionClassifierResult { + const message = { + ...baseRecognitionClassifierResult, + } as RecognitionClassifierResult; + message.classifier = object.classifier ?? ""; + message.highlights = + object.highlights?.map((e) => PhraseHighlight.fromPartial(e)) || []; + message.labels = + object.labels?.map((e) => RecognitionClassifierLabel.fromPartial(e)) || + []; + return message; + }, +}; + +messageTypeRegistry.set( + RecognitionClassifierResult.$type, + RecognitionClassifierResult +); + +const baseRecognitionClassifierUpdate: object = { + $type: "speechkit.stt.v3.RecognitionClassifierUpdate", + windowType: 0, + startTimeMs: 0, + endTimeMs: 0, +}; + +export const RecognitionClassifierUpdate = { + $type: "speechkit.stt.v3.RecognitionClassifierUpdate" as const, + + encode( + message: RecognitionClassifierUpdate, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.windowType !== 0) { + writer.uint32(8).int32(message.windowType); + } + if (message.startTimeMs !== 0) { + writer.uint32(16).int64(message.startTimeMs); + } + if (message.endTimeMs !== 0) { + writer.uint32(24).int64(message.endTimeMs); + } + if (message.classifierResult !== undefined) { + RecognitionClassifierResult.encode( + message.classifierResult, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RecognitionClassifierUpdate { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRecognitionClassifierUpdate, + } as RecognitionClassifierUpdate; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.windowType = reader.int32() as any; + break; + case 2: + message.startTimeMs = longToNumber(reader.int64() as Long); + break; + case 3: + message.endTimeMs = longToNumber(reader.int64() as Long); + break; + case 4: + message.classifierResult = RecognitionClassifierResult.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RecognitionClassifierUpdate { + const message = { + ...baseRecognitionClassifierUpdate, + } as RecognitionClassifierUpdate; + message.windowType = + object.windowType !== undefined && object.windowType !== null + ? recognitionClassifierUpdate_WindowTypeFromJSON(object.windowType) + : 0; + message.startTimeMs = + object.startTimeMs !== undefined && object.startTimeMs !== null + ? Number(object.startTimeMs) + : 0; + message.endTimeMs = + object.endTimeMs !== undefined && object.endTimeMs !== null + ? Number(object.endTimeMs) + : 0; + message.classifierResult = + object.classifierResult !== undefined && object.classifierResult !== null + ? RecognitionClassifierResult.fromJSON(object.classifierResult) + : undefined; + return message; + }, + + toJSON(message: RecognitionClassifierUpdate): unknown { + const obj: any = {}; + message.windowType !== undefined && + (obj.windowType = recognitionClassifierUpdate_WindowTypeToJSON( + message.windowType + )); + message.startTimeMs !== undefined && + (obj.startTimeMs = Math.round(message.startTimeMs)); + message.endTimeMs !== undefined && + (obj.endTimeMs = Math.round(message.endTimeMs)); + message.classifierResult !== undefined && + (obj.classifierResult = message.classifierResult + ? RecognitionClassifierResult.toJSON(message.classifierResult) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): RecognitionClassifierUpdate { + const message = { + ...baseRecognitionClassifierUpdate, + } as RecognitionClassifierUpdate; + message.windowType = object.windowType ?? 0; + message.startTimeMs = object.startTimeMs ?? 0; + message.endTimeMs = object.endTimeMs ?? 0; + message.classifierResult = + object.classifierResult !== undefined && object.classifierResult !== null + ? RecognitionClassifierResult.fromPartial(object.classifierResult) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + RecognitionClassifierUpdate.$type, + RecognitionClassifierUpdate +); + +const baseDescriptiveStatistics: object = { + $type: "speechkit.stt.v3.DescriptiveStatistics", + min: 0, + max: 0, + mean: 0, + std: 0, +}; + +export const DescriptiveStatistics = { + $type: "speechkit.stt.v3.DescriptiveStatistics" as const, + + encode( + message: DescriptiveStatistics, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.min !== 0) { + writer.uint32(9).double(message.min); + } + if (message.max !== 0) { + writer.uint32(17).double(message.max); + } + if (message.mean !== 0) { + writer.uint32(25).double(message.mean); + } + if (message.std !== 0) { + writer.uint32(33).double(message.std); + } + for (const v of message.quantiles) { + DescriptiveStatistics_Quantile.encode( + v!, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DescriptiveStatistics { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDescriptiveStatistics } as DescriptiveStatistics; + message.quantiles = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.min = reader.double(); + break; + case 2: + message.max = reader.double(); + break; + case 3: + message.mean = reader.double(); + break; + case 4: + message.std = reader.double(); + break; + case 5: + message.quantiles.push( + DescriptiveStatistics_Quantile.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DescriptiveStatistics { + const message = { ...baseDescriptiveStatistics } as DescriptiveStatistics; + message.min = + object.min !== undefined && object.min !== null ? Number(object.min) : 0; + message.max = + object.max !== undefined && object.max !== null ? Number(object.max) : 0; + message.mean = + object.mean !== undefined && object.mean !== null + ? Number(object.mean) + : 0; + message.std = + object.std !== undefined && object.std !== null ? Number(object.std) : 0; + message.quantiles = (object.quantiles ?? []).map((e: any) => + DescriptiveStatistics_Quantile.fromJSON(e) + ); + return message; + }, + + toJSON(message: DescriptiveStatistics): unknown { + const obj: any = {}; + message.min !== undefined && (obj.min = message.min); + message.max !== undefined && (obj.max = message.max); + message.mean !== undefined && (obj.mean = message.mean); + message.std !== undefined && (obj.std = message.std); + if (message.quantiles) { + obj.quantiles = message.quantiles.map((e) => + e ? DescriptiveStatistics_Quantile.toJSON(e) : undefined + ); + } else { + obj.quantiles = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): DescriptiveStatistics { + const message = { ...baseDescriptiveStatistics } as DescriptiveStatistics; + message.min = object.min ?? 0; + message.max = object.max ?? 0; + message.mean = object.mean ?? 0; + message.std = object.std ?? 0; + message.quantiles = + object.quantiles?.map((e) => + DescriptiveStatistics_Quantile.fromPartial(e) + ) || []; + return message; + }, +}; + +messageTypeRegistry.set(DescriptiveStatistics.$type, DescriptiveStatistics); + +const baseDescriptiveStatistics_Quantile: object = { + $type: "speechkit.stt.v3.DescriptiveStatistics.Quantile", + level: 0, + value: 0, +}; + +export const DescriptiveStatistics_Quantile = { + $type: "speechkit.stt.v3.DescriptiveStatistics.Quantile" as const, + + encode( + message: DescriptiveStatistics_Quantile, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.level !== 0) { + writer.uint32(9).double(message.level); + } + if (message.value !== 0) { + writer.uint32(17).double(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DescriptiveStatistics_Quantile { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDescriptiveStatistics_Quantile, + } as DescriptiveStatistics_Quantile; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.level = reader.double(); + break; + case 2: + message.value = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DescriptiveStatistics_Quantile { + const message = { + ...baseDescriptiveStatistics_Quantile, + } as DescriptiveStatistics_Quantile; + message.level = + object.level !== undefined && object.level !== null + ? Number(object.level) + : 0; + message.value = + object.value !== undefined && object.value !== null + ? Number(object.value) + : 0; + return message; + }, + + toJSON(message: DescriptiveStatistics_Quantile): unknown { + const obj: any = {}; + message.level !== undefined && (obj.level = message.level); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): DescriptiveStatistics_Quantile { + const message = { + ...baseDescriptiveStatistics_Quantile, + } as DescriptiveStatistics_Quantile; + message.level = object.level ?? 0; + message.value = object.value ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + DescriptiveStatistics_Quantile.$type, + DescriptiveStatistics_Quantile +); + +const baseAudioSegmentBoundaries: object = { + $type: "speechkit.stt.v3.AudioSegmentBoundaries", + startTimeMs: 0, + endTimeMs: 0, +}; + +export const AudioSegmentBoundaries = { + $type: "speechkit.stt.v3.AudioSegmentBoundaries" as const, + + encode( + message: AudioSegmentBoundaries, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.startTimeMs !== 0) { + writer.uint32(8).int64(message.startTimeMs); + } + if (message.endTimeMs !== 0) { + writer.uint32(16).int64(message.endTimeMs); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AudioSegmentBoundaries { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAudioSegmentBoundaries } as AudioSegmentBoundaries; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.startTimeMs = longToNumber(reader.int64() as Long); + break; + case 2: + message.endTimeMs = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AudioSegmentBoundaries { + const message = { ...baseAudioSegmentBoundaries } as AudioSegmentBoundaries; + message.startTimeMs = + object.startTimeMs !== undefined && object.startTimeMs !== null + ? Number(object.startTimeMs) + : 0; + message.endTimeMs = + object.endTimeMs !== undefined && object.endTimeMs !== null + ? Number(object.endTimeMs) + : 0; + return message; + }, + + toJSON(message: AudioSegmentBoundaries): unknown { + const obj: any = {}; + message.startTimeMs !== undefined && + (obj.startTimeMs = Math.round(message.startTimeMs)); + message.endTimeMs !== undefined && + (obj.endTimeMs = Math.round(message.endTimeMs)); + return obj; + }, + + fromPartial, I>>( + object: I + ): AudioSegmentBoundaries { + const message = { ...baseAudioSegmentBoundaries } as AudioSegmentBoundaries; + message.startTimeMs = object.startTimeMs ?? 0; + message.endTimeMs = object.endTimeMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(AudioSegmentBoundaries.$type, AudioSegmentBoundaries); + +const baseSpeakerAnalysis: object = { + $type: "speechkit.stt.v3.SpeakerAnalysis", + speakerTag: "", + windowType: 0, + totalSpeechMs: 0, + speechRatio: 0, + totalSilenceMs: 0, + silenceRatio: 0, + wordsCount: 0, + lettersCount: 0, + utteranceCount: 0, +}; + +export const SpeakerAnalysis = { + $type: "speechkit.stt.v3.SpeakerAnalysis" as const, + + encode( + message: SpeakerAnalysis, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.speakerTag !== "") { + writer.uint32(10).string(message.speakerTag); + } + if (message.windowType !== 0) { + writer.uint32(16).int32(message.windowType); + } + if (message.speechBoundaries !== undefined) { + AudioSegmentBoundaries.encode( + message.speechBoundaries, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.totalSpeechMs !== 0) { + writer.uint32(32).int64(message.totalSpeechMs); + } + if (message.speechRatio !== 0) { + writer.uint32(41).double(message.speechRatio); + } + if (message.totalSilenceMs !== 0) { + writer.uint32(48).int64(message.totalSilenceMs); + } + if (message.silenceRatio !== 0) { + writer.uint32(57).double(message.silenceRatio); + } + if (message.wordsCount !== 0) { + writer.uint32(64).int64(message.wordsCount); + } + if (message.lettersCount !== 0) { + writer.uint32(72).int64(message.lettersCount); + } + if (message.wordsPerSecond !== undefined) { + DescriptiveStatistics.encode( + message.wordsPerSecond, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.lettersPerSecond !== undefined) { + DescriptiveStatistics.encode( + message.lettersPerSecond, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.wordsPerUtterance !== undefined) { + DescriptiveStatistics.encode( + message.wordsPerUtterance, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.lettersPerUtterance !== undefined) { + DescriptiveStatistics.encode( + message.lettersPerUtterance, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.utteranceCount !== 0) { + writer.uint32(112).int64(message.utteranceCount); + } + if (message.utteranceDurationEstimation !== undefined) { + DescriptiveStatistics.encode( + message.utteranceDurationEstimation, + writer.uint32(122).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SpeakerAnalysis { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSpeakerAnalysis } as SpeakerAnalysis; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.speakerTag = reader.string(); + break; + case 2: + message.windowType = reader.int32() as any; + break; + case 3: + message.speechBoundaries = AudioSegmentBoundaries.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.totalSpeechMs = longToNumber(reader.int64() as Long); + break; + case 5: + message.speechRatio = reader.double(); + break; + case 6: + message.totalSilenceMs = longToNumber(reader.int64() as Long); + break; + case 7: + message.silenceRatio = reader.double(); + break; + case 8: + message.wordsCount = longToNumber(reader.int64() as Long); + break; + case 9: + message.lettersCount = longToNumber(reader.int64() as Long); + break; + case 10: + message.wordsPerSecond = DescriptiveStatistics.decode( + reader, + reader.uint32() + ); + break; + case 11: + message.lettersPerSecond = DescriptiveStatistics.decode( + reader, + reader.uint32() + ); + break; + case 12: + message.wordsPerUtterance = DescriptiveStatistics.decode( + reader, + reader.uint32() + ); + break; + case 13: + message.lettersPerUtterance = DescriptiveStatistics.decode( + reader, + reader.uint32() + ); + break; + case 14: + message.utteranceCount = longToNumber(reader.int64() as Long); + break; + case 15: + message.utteranceDurationEstimation = DescriptiveStatistics.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SpeakerAnalysis { + const message = { ...baseSpeakerAnalysis } as SpeakerAnalysis; + message.speakerTag = + object.speakerTag !== undefined && object.speakerTag !== null + ? String(object.speakerTag) + : ""; + message.windowType = + object.windowType !== undefined && object.windowType !== null + ? speakerAnalysis_WindowTypeFromJSON(object.windowType) + : 0; + message.speechBoundaries = + object.speechBoundaries !== undefined && object.speechBoundaries !== null + ? AudioSegmentBoundaries.fromJSON(object.speechBoundaries) + : undefined; + message.totalSpeechMs = + object.totalSpeechMs !== undefined && object.totalSpeechMs !== null + ? Number(object.totalSpeechMs) + : 0; + message.speechRatio = + object.speechRatio !== undefined && object.speechRatio !== null + ? Number(object.speechRatio) + : 0; + message.totalSilenceMs = + object.totalSilenceMs !== undefined && object.totalSilenceMs !== null + ? Number(object.totalSilenceMs) + : 0; + message.silenceRatio = + object.silenceRatio !== undefined && object.silenceRatio !== null + ? Number(object.silenceRatio) + : 0; + message.wordsCount = + object.wordsCount !== undefined && object.wordsCount !== null + ? Number(object.wordsCount) + : 0; + message.lettersCount = + object.lettersCount !== undefined && object.lettersCount !== null + ? Number(object.lettersCount) + : 0; + message.wordsPerSecond = + object.wordsPerSecond !== undefined && object.wordsPerSecond !== null + ? DescriptiveStatistics.fromJSON(object.wordsPerSecond) + : undefined; + message.lettersPerSecond = + object.lettersPerSecond !== undefined && object.lettersPerSecond !== null + ? DescriptiveStatistics.fromJSON(object.lettersPerSecond) + : undefined; + message.wordsPerUtterance = + object.wordsPerUtterance !== undefined && + object.wordsPerUtterance !== null + ? DescriptiveStatistics.fromJSON(object.wordsPerUtterance) + : undefined; + message.lettersPerUtterance = + object.lettersPerUtterance !== undefined && + object.lettersPerUtterance !== null + ? DescriptiveStatistics.fromJSON(object.lettersPerUtterance) + : undefined; + message.utteranceCount = + object.utteranceCount !== undefined && object.utteranceCount !== null + ? Number(object.utteranceCount) + : 0; + message.utteranceDurationEstimation = + object.utteranceDurationEstimation !== undefined && + object.utteranceDurationEstimation !== null + ? DescriptiveStatistics.fromJSON(object.utteranceDurationEstimation) + : undefined; + return message; + }, + + toJSON(message: SpeakerAnalysis): unknown { + const obj: any = {}; + message.speakerTag !== undefined && (obj.speakerTag = message.speakerTag); + message.windowType !== undefined && + (obj.windowType = speakerAnalysis_WindowTypeToJSON(message.windowType)); + message.speechBoundaries !== undefined && + (obj.speechBoundaries = message.speechBoundaries + ? AudioSegmentBoundaries.toJSON(message.speechBoundaries) + : undefined); + message.totalSpeechMs !== undefined && + (obj.totalSpeechMs = Math.round(message.totalSpeechMs)); + message.speechRatio !== undefined && + (obj.speechRatio = message.speechRatio); + message.totalSilenceMs !== undefined && + (obj.totalSilenceMs = Math.round(message.totalSilenceMs)); + message.silenceRatio !== undefined && + (obj.silenceRatio = message.silenceRatio); + message.wordsCount !== undefined && + (obj.wordsCount = Math.round(message.wordsCount)); + message.lettersCount !== undefined && + (obj.lettersCount = Math.round(message.lettersCount)); + message.wordsPerSecond !== undefined && + (obj.wordsPerSecond = message.wordsPerSecond + ? DescriptiveStatistics.toJSON(message.wordsPerSecond) + : undefined); + message.lettersPerSecond !== undefined && + (obj.lettersPerSecond = message.lettersPerSecond + ? DescriptiveStatistics.toJSON(message.lettersPerSecond) + : undefined); + message.wordsPerUtterance !== undefined && + (obj.wordsPerUtterance = message.wordsPerUtterance + ? DescriptiveStatistics.toJSON(message.wordsPerUtterance) + : undefined); + message.lettersPerUtterance !== undefined && + (obj.lettersPerUtterance = message.lettersPerUtterance + ? DescriptiveStatistics.toJSON(message.lettersPerUtterance) + : undefined); + message.utteranceCount !== undefined && + (obj.utteranceCount = Math.round(message.utteranceCount)); + message.utteranceDurationEstimation !== undefined && + (obj.utteranceDurationEstimation = message.utteranceDurationEstimation + ? DescriptiveStatistics.toJSON(message.utteranceDurationEstimation) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): SpeakerAnalysis { + const message = { ...baseSpeakerAnalysis } as SpeakerAnalysis; + message.speakerTag = object.speakerTag ?? ""; + message.windowType = object.windowType ?? 0; + message.speechBoundaries = + object.speechBoundaries !== undefined && object.speechBoundaries !== null + ? AudioSegmentBoundaries.fromPartial(object.speechBoundaries) + : undefined; + message.totalSpeechMs = object.totalSpeechMs ?? 0; + message.speechRatio = object.speechRatio ?? 0; + message.totalSilenceMs = object.totalSilenceMs ?? 0; + message.silenceRatio = object.silenceRatio ?? 0; + message.wordsCount = object.wordsCount ?? 0; + message.lettersCount = object.lettersCount ?? 0; + message.wordsPerSecond = + object.wordsPerSecond !== undefined && object.wordsPerSecond !== null + ? DescriptiveStatistics.fromPartial(object.wordsPerSecond) + : undefined; + message.lettersPerSecond = + object.lettersPerSecond !== undefined && object.lettersPerSecond !== null + ? DescriptiveStatistics.fromPartial(object.lettersPerSecond) + : undefined; + message.wordsPerUtterance = + object.wordsPerUtterance !== undefined && + object.wordsPerUtterance !== null + ? DescriptiveStatistics.fromPartial(object.wordsPerUtterance) + : undefined; + message.lettersPerUtterance = + object.lettersPerUtterance !== undefined && + object.lettersPerUtterance !== null + ? DescriptiveStatistics.fromPartial(object.lettersPerUtterance) + : undefined; + message.utteranceCount = object.utteranceCount ?? 0; + message.utteranceDurationEstimation = + object.utteranceDurationEstimation !== undefined && + object.utteranceDurationEstimation !== null + ? DescriptiveStatistics.fromPartial(object.utteranceDurationEstimation) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(SpeakerAnalysis.$type, SpeakerAnalysis); + +const baseConversationAnalysis: object = { + $type: "speechkit.stt.v3.ConversationAnalysis", + totalSimultaneousSilenceDurationMs: 0, + totalSimultaneousSilenceRatio: 0, + totalSimultaneousSpeechDurationMs: 0, + totalSimultaneousSpeechRatio: 0, +}; + +export const ConversationAnalysis = { + $type: "speechkit.stt.v3.ConversationAnalysis" as const, + + encode( + message: ConversationAnalysis, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.conversationBoundaries !== undefined) { + AudioSegmentBoundaries.encode( + message.conversationBoundaries, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.totalSimultaneousSilenceDurationMs !== 0) { + writer.uint32(16).int64(message.totalSimultaneousSilenceDurationMs); + } + if (message.totalSimultaneousSilenceRatio !== 0) { + writer.uint32(25).double(message.totalSimultaneousSilenceRatio); + } + if (message.simultaneousSilenceDurationEstimation !== undefined) { + DescriptiveStatistics.encode( + message.simultaneousSilenceDurationEstimation, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.totalSimultaneousSpeechDurationMs !== 0) { + writer.uint32(40).int64(message.totalSimultaneousSpeechDurationMs); + } + if (message.totalSimultaneousSpeechRatio !== 0) { + writer.uint32(49).double(message.totalSimultaneousSpeechRatio); + } + if (message.simultaneousSpeechDurationEstimation !== undefined) { + DescriptiveStatistics.encode( + message.simultaneousSpeechDurationEstimation, + writer.uint32(58).fork() + ).ldelim(); + } + for (const v of message.speakerInterrupts) { + ConversationAnalysis_InterruptsEvaluation.encode( + v!, + writer.uint32(66).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ConversationAnalysis { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConversationAnalysis } as ConversationAnalysis; + message.speakerInterrupts = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.conversationBoundaries = AudioSegmentBoundaries.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.totalSimultaneousSilenceDurationMs = longToNumber( + reader.int64() as Long + ); + break; + case 3: + message.totalSimultaneousSilenceRatio = reader.double(); + break; + case 4: + message.simultaneousSilenceDurationEstimation = + DescriptiveStatistics.decode(reader, reader.uint32()); + break; + case 5: + message.totalSimultaneousSpeechDurationMs = longToNumber( + reader.int64() as Long + ); + break; + case 6: + message.totalSimultaneousSpeechRatio = reader.double(); + break; + case 7: + message.simultaneousSpeechDurationEstimation = + DescriptiveStatistics.decode(reader, reader.uint32()); + break; + case 8: + message.speakerInterrupts.push( + ConversationAnalysis_InterruptsEvaluation.decode( + reader, + reader.uint32() + ) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ConversationAnalysis { + const message = { ...baseConversationAnalysis } as ConversationAnalysis; + message.conversationBoundaries = + object.conversationBoundaries !== undefined && + object.conversationBoundaries !== null + ? AudioSegmentBoundaries.fromJSON(object.conversationBoundaries) + : undefined; + message.totalSimultaneousSilenceDurationMs = + object.totalSimultaneousSilenceDurationMs !== undefined && + object.totalSimultaneousSilenceDurationMs !== null + ? Number(object.totalSimultaneousSilenceDurationMs) + : 0; + message.totalSimultaneousSilenceRatio = + object.totalSimultaneousSilenceRatio !== undefined && + object.totalSimultaneousSilenceRatio !== null + ? Number(object.totalSimultaneousSilenceRatio) + : 0; + message.simultaneousSilenceDurationEstimation = + object.simultaneousSilenceDurationEstimation !== undefined && + object.simultaneousSilenceDurationEstimation !== null + ? DescriptiveStatistics.fromJSON( + object.simultaneousSilenceDurationEstimation + ) + : undefined; + message.totalSimultaneousSpeechDurationMs = + object.totalSimultaneousSpeechDurationMs !== undefined && + object.totalSimultaneousSpeechDurationMs !== null + ? Number(object.totalSimultaneousSpeechDurationMs) + : 0; + message.totalSimultaneousSpeechRatio = + object.totalSimultaneousSpeechRatio !== undefined && + object.totalSimultaneousSpeechRatio !== null + ? Number(object.totalSimultaneousSpeechRatio) + : 0; + message.simultaneousSpeechDurationEstimation = + object.simultaneousSpeechDurationEstimation !== undefined && + object.simultaneousSpeechDurationEstimation !== null + ? DescriptiveStatistics.fromJSON( + object.simultaneousSpeechDurationEstimation + ) + : undefined; + message.speakerInterrupts = (object.speakerInterrupts ?? []).map((e: any) => + ConversationAnalysis_InterruptsEvaluation.fromJSON(e) + ); + return message; + }, + + toJSON(message: ConversationAnalysis): unknown { + const obj: any = {}; + message.conversationBoundaries !== undefined && + (obj.conversationBoundaries = message.conversationBoundaries + ? AudioSegmentBoundaries.toJSON(message.conversationBoundaries) + : undefined); + message.totalSimultaneousSilenceDurationMs !== undefined && + (obj.totalSimultaneousSilenceDurationMs = Math.round( + message.totalSimultaneousSilenceDurationMs + )); + message.totalSimultaneousSilenceRatio !== undefined && + (obj.totalSimultaneousSilenceRatio = + message.totalSimultaneousSilenceRatio); + message.simultaneousSilenceDurationEstimation !== undefined && + (obj.simultaneousSilenceDurationEstimation = + message.simultaneousSilenceDurationEstimation + ? DescriptiveStatistics.toJSON( + message.simultaneousSilenceDurationEstimation + ) + : undefined); + message.totalSimultaneousSpeechDurationMs !== undefined && + (obj.totalSimultaneousSpeechDurationMs = Math.round( + message.totalSimultaneousSpeechDurationMs + )); + message.totalSimultaneousSpeechRatio !== undefined && + (obj.totalSimultaneousSpeechRatio = message.totalSimultaneousSpeechRatio); + message.simultaneousSpeechDurationEstimation !== undefined && + (obj.simultaneousSpeechDurationEstimation = + message.simultaneousSpeechDurationEstimation + ? DescriptiveStatistics.toJSON( + message.simultaneousSpeechDurationEstimation + ) + : undefined); + if (message.speakerInterrupts) { + obj.speakerInterrupts = message.speakerInterrupts.map((e) => + e ? ConversationAnalysis_InterruptsEvaluation.toJSON(e) : undefined + ); + } else { + obj.speakerInterrupts = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ConversationAnalysis { + const message = { ...baseConversationAnalysis } as ConversationAnalysis; + message.conversationBoundaries = + object.conversationBoundaries !== undefined && + object.conversationBoundaries !== null + ? AudioSegmentBoundaries.fromPartial(object.conversationBoundaries) + : undefined; + message.totalSimultaneousSilenceDurationMs = + object.totalSimultaneousSilenceDurationMs ?? 0; + message.totalSimultaneousSilenceRatio = + object.totalSimultaneousSilenceRatio ?? 0; + message.simultaneousSilenceDurationEstimation = + object.simultaneousSilenceDurationEstimation !== undefined && + object.simultaneousSilenceDurationEstimation !== null + ? DescriptiveStatistics.fromPartial( + object.simultaneousSilenceDurationEstimation + ) + : undefined; + message.totalSimultaneousSpeechDurationMs = + object.totalSimultaneousSpeechDurationMs ?? 0; + message.totalSimultaneousSpeechRatio = + object.totalSimultaneousSpeechRatio ?? 0; + message.simultaneousSpeechDurationEstimation = + object.simultaneousSpeechDurationEstimation !== undefined && + object.simultaneousSpeechDurationEstimation !== null + ? DescriptiveStatistics.fromPartial( + object.simultaneousSpeechDurationEstimation + ) + : undefined; + message.speakerInterrupts = + object.speakerInterrupts?.map((e) => + ConversationAnalysis_InterruptsEvaluation.fromPartial(e) + ) || []; + return message; + }, +}; + +messageTypeRegistry.set(ConversationAnalysis.$type, ConversationAnalysis); + +const baseConversationAnalysis_InterruptsEvaluation: object = { + $type: "speechkit.stt.v3.ConversationAnalysis.InterruptsEvaluation", + speakerTag: "", + interruptsCount: 0, + interruptsDurationMs: 0, +}; + +export const ConversationAnalysis_InterruptsEvaluation = { + $type: "speechkit.stt.v3.ConversationAnalysis.InterruptsEvaluation" as const, + + encode( + message: ConversationAnalysis_InterruptsEvaluation, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.speakerTag !== "") { + writer.uint32(10).string(message.speakerTag); + } + if (message.interruptsCount !== 0) { + writer.uint32(16).int64(message.interruptsCount); + } + if (message.interruptsDurationMs !== 0) { + writer.uint32(24).int64(message.interruptsDurationMs); + } + for (const v of message.interrupts) { + AudioSegmentBoundaries.encode(v!, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ConversationAnalysis_InterruptsEvaluation { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseConversationAnalysis_InterruptsEvaluation, + } as ConversationAnalysis_InterruptsEvaluation; + message.interrupts = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.speakerTag = reader.string(); + break; + case 2: + message.interruptsCount = longToNumber(reader.int64() as Long); + break; + case 3: + message.interruptsDurationMs = longToNumber(reader.int64() as Long); + break; + case 4: + message.interrupts.push( + AudioSegmentBoundaries.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ConversationAnalysis_InterruptsEvaluation { + const message = { + ...baseConversationAnalysis_InterruptsEvaluation, + } as ConversationAnalysis_InterruptsEvaluation; + message.speakerTag = + object.speakerTag !== undefined && object.speakerTag !== null + ? String(object.speakerTag) + : ""; + message.interruptsCount = + object.interruptsCount !== undefined && object.interruptsCount !== null + ? Number(object.interruptsCount) + : 0; + message.interruptsDurationMs = + object.interruptsDurationMs !== undefined && + object.interruptsDurationMs !== null + ? Number(object.interruptsDurationMs) + : 0; + message.interrupts = (object.interrupts ?? []).map((e: any) => + AudioSegmentBoundaries.fromJSON(e) + ); + return message; + }, + + toJSON(message: ConversationAnalysis_InterruptsEvaluation): unknown { + const obj: any = {}; + message.speakerTag !== undefined && (obj.speakerTag = message.speakerTag); + message.interruptsCount !== undefined && + (obj.interruptsCount = Math.round(message.interruptsCount)); + message.interruptsDurationMs !== undefined && + (obj.interruptsDurationMs = Math.round(message.interruptsDurationMs)); + if (message.interrupts) { + obj.interrupts = message.interrupts.map((e) => + e ? AudioSegmentBoundaries.toJSON(e) : undefined + ); + } else { + obj.interrupts = []; + } + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ConversationAnalysis_InterruptsEvaluation { + const message = { + ...baseConversationAnalysis_InterruptsEvaluation, + } as ConversationAnalysis_InterruptsEvaluation; + message.speakerTag = object.speakerTag ?? ""; + message.interruptsCount = object.interruptsCount ?? 0; + message.interruptsDurationMs = object.interruptsDurationMs ?? 0; + message.interrupts = + object.interrupts?.map((e) => AudioSegmentBoundaries.fromPartial(e)) || + []; + return message; + }, +}; + +messageTypeRegistry.set( + ConversationAnalysis_InterruptsEvaluation.$type, + ConversationAnalysis_InterruptsEvaluation +); + +const baseStreamingResponse: object = { + $type: "speechkit.stt.v3.StreamingResponse", + responseWallTimeMs: 0, + channelTag: "", +}; + +export const StreamingResponse = { + $type: "speechkit.stt.v3.StreamingResponse" as const, + + encode( + message: StreamingResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sessionUuid !== undefined) { + SessionUuid.encode( + message.sessionUuid, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.audioCursors !== undefined) { + AudioCursors.encode( + message.audioCursors, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.responseWallTimeMs !== 0) { + writer.uint32(24).int64(message.responseWallTimeMs); + } + if (message.partial !== undefined) { + AlternativeUpdate.encode( + message.partial, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.final !== undefined) { + AlternativeUpdate.encode( + message.final, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.eouUpdate !== undefined) { + EouUpdate.encode(message.eouUpdate, writer.uint32(50).fork()).ldelim(); + } + if (message.finalRefinement !== undefined) { + FinalRefinement.encode( + message.finalRefinement, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.statusCode !== undefined) { + StatusCode.encode(message.statusCode, writer.uint32(66).fork()).ldelim(); + } + if (message.classifierUpdate !== undefined) { + RecognitionClassifierUpdate.encode( + message.classifierUpdate, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.speakerAnalysis !== undefined) { + SpeakerAnalysis.encode( + message.speakerAnalysis, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.conversationAnalysis !== undefined) { + ConversationAnalysis.encode( + message.conversationAnalysis, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.channelTag !== "") { + writer.uint32(74).string(message.channelTag); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StreamingResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStreamingResponse } as StreamingResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sessionUuid = SessionUuid.decode(reader, reader.uint32()); + break; + case 2: + message.audioCursors = AudioCursors.decode(reader, reader.uint32()); + break; + case 3: + message.responseWallTimeMs = longToNumber(reader.int64() as Long); + break; + case 4: + message.partial = AlternativeUpdate.decode(reader, reader.uint32()); + break; + case 5: + message.final = AlternativeUpdate.decode(reader, reader.uint32()); + break; + case 6: + message.eouUpdate = EouUpdate.decode(reader, reader.uint32()); + break; + case 7: + message.finalRefinement = FinalRefinement.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.statusCode = StatusCode.decode(reader, reader.uint32()); + break; + case 10: + message.classifierUpdate = RecognitionClassifierUpdate.decode( + reader, + reader.uint32() + ); + break; + case 11: + message.speakerAnalysis = SpeakerAnalysis.decode( + reader, + reader.uint32() + ); + break; + case 12: + message.conversationAnalysis = ConversationAnalysis.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.channelTag = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StreamingResponse { + const message = { ...baseStreamingResponse } as StreamingResponse; + message.sessionUuid = + object.sessionUuid !== undefined && object.sessionUuid !== null + ? SessionUuid.fromJSON(object.sessionUuid) + : undefined; + message.audioCursors = + object.audioCursors !== undefined && object.audioCursors !== null + ? AudioCursors.fromJSON(object.audioCursors) + : undefined; + message.responseWallTimeMs = + object.responseWallTimeMs !== undefined && + object.responseWallTimeMs !== null + ? Number(object.responseWallTimeMs) + : 0; + message.partial = + object.partial !== undefined && object.partial !== null + ? AlternativeUpdate.fromJSON(object.partial) + : undefined; + message.final = + object.final !== undefined && object.final !== null + ? AlternativeUpdate.fromJSON(object.final) : undefined; message.eouUpdate = object.eouUpdate !== undefined && object.eouUpdate !== null @@ -2667,6 +5105,23 @@ export const StreamingResponse = { object.statusCode !== undefined && object.statusCode !== null ? StatusCode.fromJSON(object.statusCode) : undefined; + message.classifierUpdate = + object.classifierUpdate !== undefined && object.classifierUpdate !== null + ? RecognitionClassifierUpdate.fromJSON(object.classifierUpdate) + : undefined; + message.speakerAnalysis = + object.speakerAnalysis !== undefined && object.speakerAnalysis !== null + ? SpeakerAnalysis.fromJSON(object.speakerAnalysis) + : undefined; + message.conversationAnalysis = + object.conversationAnalysis !== undefined && + object.conversationAnalysis !== null + ? ConversationAnalysis.fromJSON(object.conversationAnalysis) + : undefined; + message.channelTag = + object.channelTag !== undefined && object.channelTag !== null + ? String(object.channelTag) + : ""; return message; }, @@ -2702,6 +5157,19 @@ export const StreamingResponse = { (obj.statusCode = message.statusCode ? StatusCode.toJSON(message.statusCode) : undefined); + message.classifierUpdate !== undefined && + (obj.classifierUpdate = message.classifierUpdate + ? RecognitionClassifierUpdate.toJSON(message.classifierUpdate) + : undefined); + message.speakerAnalysis !== undefined && + (obj.speakerAnalysis = message.speakerAnalysis + ? SpeakerAnalysis.toJSON(message.speakerAnalysis) + : undefined); + message.conversationAnalysis !== undefined && + (obj.conversationAnalysis = message.conversationAnalysis + ? ConversationAnalysis.toJSON(message.conversationAnalysis) + : undefined); + message.channelTag !== undefined && (obj.channelTag = message.channelTag); return obj; }, @@ -2738,6 +5206,20 @@ export const StreamingResponse = { object.statusCode !== undefined && object.statusCode !== null ? StatusCode.fromPartial(object.statusCode) : undefined; + message.classifierUpdate = + object.classifierUpdate !== undefined && object.classifierUpdate !== null + ? RecognitionClassifierUpdate.fromPartial(object.classifierUpdate) + : undefined; + message.speakerAnalysis = + object.speakerAnalysis !== undefined && object.speakerAnalysis !== null + ? SpeakerAnalysis.fromPartial(object.speakerAnalysis) + : undefined; + message.conversationAnalysis = + object.conversationAnalysis !== undefined && + object.conversationAnalysis !== null + ? ConversationAnalysis.fromPartial(object.conversationAnalysis) + : undefined; + message.channelTag = object.channelTag ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/ai/stt/v3/stt_service.ts b/src/generated/yandex/cloud/ai/stt/v3/stt_service.ts index 56c6cc74..ae84357f 100644 --- a/src/generated/yandex/cloud/ai/stt/v3/stt_service.ts +++ b/src/generated/yandex/cloud/ai/stt/v3/stt_service.ts @@ -1,4 +1,5 @@ /* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import { makeGenericClientConstructor, @@ -10,15 +11,93 @@ import { ClientDuplexStream, CallOptions, Metadata, + handleUnaryCall, + handleServerStreamingCall, + ClientUnaryCall, + ClientReadableStream, + ServiceError, } from "@grpc/grpc-js"; import _m0 from "protobufjs/minimal"; import { StreamingRequest, StreamingResponse, + RecognizeFileRequest, } from "../../../../../yandex/cloud/ai/stt/v3/stt"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; export const protobufPackage = "speechkit.stt.v3"; +export interface GetRecognitionRequest { + $type: "speechkit.stt.v3.GetRecognitionRequest"; + operationId: string; +} + +const baseGetRecognitionRequest: object = { + $type: "speechkit.stt.v3.GetRecognitionRequest", + operationId: "", +}; + +export const GetRecognitionRequest = { + $type: "speechkit.stt.v3.GetRecognitionRequest" as const, + + encode( + message: GetRecognitionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.operationId !== "") { + writer.uint32(10).string(message.operationId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetRecognitionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetRecognitionRequest } as GetRecognitionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operationId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetRecognitionRequest { + const message = { ...baseGetRecognitionRequest } as GetRecognitionRequest; + message.operationId = + object.operationId !== undefined && object.operationId !== null + ? String(object.operationId) + : ""; + return message; + }, + + toJSON(message: GetRecognitionRequest): unknown { + const obj: any = {}; + message.operationId !== undefined && + (obj.operationId = message.operationId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetRecognitionRequest { + const message = { ...baseGetRecognitionRequest } as GetRecognitionRequest; + message.operationId = object.operationId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetRecognitionRequest.$type, GetRecognitionRequest); + /** A set of methods for voice recognition. */ export const RecognizerService = { /** Expects audio in real-time */ @@ -67,6 +146,106 @@ export const RecognizerClient = makeGenericClientConstructor( service: typeof RecognizerService; }; +/** A set of methods for async voice recognition. */ +export const AsyncRecognizerService = { + recognizeFile: { + path: "/speechkit.stt.v3.AsyncRecognizer/RecognizeFile", + requestStream: false, + responseStream: false, + requestSerialize: (value: RecognizeFileRequest) => + Buffer.from(RecognizeFileRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RecognizeFileRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + getRecognition: { + path: "/speechkit.stt.v3.AsyncRecognizer/GetRecognition", + requestStream: false, + responseStream: true, + requestSerialize: (value: GetRecognitionRequest) => + Buffer.from(GetRecognitionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetRecognitionRequest.decode(value), + responseSerialize: (value: StreamingResponse) => + Buffer.from(StreamingResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => StreamingResponse.decode(value), + }, +} as const; + +export interface AsyncRecognizerServer extends UntypedServiceImplementation { + recognizeFile: handleUnaryCall; + getRecognition: handleServerStreamingCall< + GetRecognitionRequest, + StreamingResponse + >; +} + +export interface AsyncRecognizerClient extends Client { + recognizeFile( + request: RecognizeFileRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + recognizeFile( + request: RecognizeFileRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + recognizeFile( + request: RecognizeFileRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + getRecognition( + request: GetRecognitionRequest, + options?: Partial + ): ClientReadableStream; + getRecognition( + request: GetRecognitionRequest, + metadata?: Metadata, + options?: Partial + ): ClientReadableStream; +} + +export const AsyncRecognizerClient = makeGenericClientConstructor( + AsyncRecognizerService, + "speechkit.stt.v3.AsyncRecognizer" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): AsyncRecognizerClient; + service: typeof AsyncRecognizerService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + if (_m0.util.Long !== Long) { _m0.util.Long = Long as any; _m0.configure(); diff --git a/src/generated/yandex/cloud/ai/tts/v3/tts.ts b/src/generated/yandex/cloud/ai/tts/v3/tts.ts index b671fde6..0af2be42 100644 --- a/src/generated/yandex/cloud/ai/tts/v3/tts.ts +++ b/src/generated/yandex/cloud/ai/tts/v3/tts.ts @@ -194,6 +194,8 @@ export interface Hints { volume: number | undefined; /** Hint to specify pronunciation character for the speaker. */ role: string | undefined; + /** Hint to increase (or decrease) speaker's pitch, measured in Hz. Valid values are in range [-1000;1000], default value is 0. */ + pitchShift: number | undefined; } export interface UtteranceSynthesisRequest { @@ -1104,6 +1106,9 @@ export const Hints = { if (message.role !== undefined) { writer.uint32(42).string(message.role); } + if (message.pitchShift !== undefined) { + writer.uint32(49).double(message.pitchShift); + } return writer; }, @@ -1129,6 +1134,9 @@ export const Hints = { case 5: message.role = reader.string(); break; + case 6: + message.pitchShift = reader.double(); + break; default: reader.skipType(tag & 7); break; @@ -1159,6 +1167,10 @@ export const Hints = { object.role !== undefined && object.role !== null ? String(object.role) : undefined; + message.pitchShift = + object.pitchShift !== undefined && object.pitchShift !== null + ? Number(object.pitchShift) + : undefined; return message; }, @@ -1172,6 +1184,7 @@ export const Hints = { message.speed !== undefined && (obj.speed = message.speed); message.volume !== undefined && (obj.volume = message.volume); message.role !== undefined && (obj.role = message.role); + message.pitchShift !== undefined && (obj.pitchShift = message.pitchShift); return obj; }, @@ -1185,6 +1198,7 @@ export const Hints = { message.speed = object.speed ?? undefined; message.volume = object.volume ?? undefined; message.role = object.role ?? undefined; + message.pitchShift = object.pitchShift ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/ai/vision/v1/vision_service.ts b/src/generated/yandex/cloud/ai/vision/v1/vision_service.ts index 25ccaf67..101dcdbe 100644 --- a/src/generated/yandex/cloud/ai/vision/v1/vision_service.ts +++ b/src/generated/yandex/cloud/ai/vision/v1/vision_service.ts @@ -28,7 +28,7 @@ export interface BatchAnalyzeRequest { * A list of specifications. Each specification contains the file to analyze and features to use for analysis. * * Restrictions: - * * Supported file formats: JPEG, PNG. + * * Supported file formats: `JPEG`, `PNG`. * * Maximum file size: 1 MB. * * Image size should not exceed 20M pixels (length x width). */ @@ -133,14 +133,14 @@ export interface FeatureTextDetectionConfig { $type: "yandex.cloud.ai.vision.v1.FeatureTextDetectionConfig"; /** * List of the languages to recognize text. - * Specified in [ISO 639-1](https://en.wikipedia.org/wiki/ISO_639-1) format (for example, `` ru ``). + * Specified in [ISO 639-1](https://en.wikipedia.org/wiki/ISO_639-1) format (for example, `ru`). */ languageCodes: string[]; /** * Model to use for text detection. * Possible values: - * * page (default) - this model is suitable for detecting multiple text entries in an image. - * * line - this model is suitable for cropped images with one line of text. + * * `page` (default): this model is suitable for detecting multiple text entries in an image. + * * `line`: this model is suitable for cropped images with one line of text. */ model: string; } diff --git a/src/generated/yandex/cloud/apploadbalancer/index.ts b/src/generated/yandex/cloud/apploadbalancer/index.ts index 1f7e7fe6..2f8de2fe 100644 --- a/src/generated/yandex/cloud/apploadbalancer/index.ts +++ b/src/generated/yandex/cloud/apploadbalancer/index.ts @@ -4,6 +4,7 @@ export * as http_router from './v1/http_router' export * as http_router_service from './v1/http_router_service' export * as load_balancer from './v1/load_balancer' export * as load_balancer_service from './v1/load_balancer_service' +export * as logging from './v1/logging' export * as payload from './v1/payload' export * as target_group from './v1/target_group' export * as target_group_service from './v1/target_group_service' diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/http_router_service.ts b/src/generated/yandex/cloud/apploadbalancer/v1/http_router_service.ts index 3a707ecc..6d5c843b 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/http_router_service.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/http_router_service.ts @@ -15,8 +15,11 @@ import { } from "@grpc/grpc-js"; import _m0 from "protobufjs/minimal"; import { FieldMask } from "../../../../google/protobuf/field_mask"; +import { + RouteOptions, + VirtualHost, +} from "../../../../yandex/cloud/apploadbalancer/v1/virtual_host"; import { HttpRouter } from "../../../../yandex/cloud/apploadbalancer/v1/http_router"; -import { VirtualHost } from "../../../../yandex/cloud/apploadbalancer/v1/virtual_host"; import { Operation } from "../../../../yandex/cloud/operation/operation"; export const protobufPackage = "yandex.cloud.apploadbalancer.v1"; @@ -131,6 +134,8 @@ export interface UpdateHttpRouterRequest { * a virtual host, make a [VirtualHostService.Create] request or a [VirtualHostService.Delete] request. */ virtualHosts: VirtualHost[]; + /** New route options for the HTTP router. */ + routeOptions?: RouteOptions; } export interface UpdateHttpRouterRequest_LabelsEntry { @@ -172,6 +177,8 @@ export interface CreateHttpRouterRequest { * Only one virtual host with no authority (default match) can be specified. */ virtualHosts: VirtualHost[]; + /** Route options for the HTTP router. */ + routeOptions?: RouteOptions; } export interface CreateHttpRouterRequest_LabelsEntry { @@ -671,6 +678,12 @@ export const UpdateHttpRouterRequest = { for (const v of message.virtualHosts) { VirtualHost.encode(v!, writer.uint32(50).fork()).ldelim(); } + if (message.routeOptions !== undefined) { + RouteOptions.encode( + message.routeOptions, + writer.uint32(66).fork() + ).ldelim(); + } return writer; }, @@ -714,6 +727,9 @@ export const UpdateHttpRouterRequest = { VirtualHost.decode(reader, reader.uint32()) ); break; + case 8: + message.routeOptions = RouteOptions.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -751,6 +767,10 @@ export const UpdateHttpRouterRequest = { message.virtualHosts = (object.virtualHosts ?? []).map((e: any) => VirtualHost.fromJSON(e) ); + message.routeOptions = + object.routeOptions !== undefined && object.routeOptions !== null + ? RouteOptions.fromJSON(object.routeOptions) + : undefined; return message; }, @@ -778,6 +798,10 @@ export const UpdateHttpRouterRequest = { } else { obj.virtualHosts = []; } + message.routeOptions !== undefined && + (obj.routeOptions = message.routeOptions + ? RouteOptions.toJSON(message.routeOptions) + : undefined); return obj; }, @@ -804,6 +828,10 @@ export const UpdateHttpRouterRequest = { }, {}); message.virtualHosts = object.virtualHosts?.map((e) => VirtualHost.fromPartial(e)) || []; + message.routeOptions = + object.routeOptions !== undefined && object.routeOptions !== null + ? RouteOptions.fromPartial(object.routeOptions) + : undefined; return message; }, }; @@ -1008,6 +1036,12 @@ export const CreateHttpRouterRequest = { for (const v of message.virtualHosts) { VirtualHost.encode(v!, writer.uint32(42).fork()).ldelim(); } + if (message.routeOptions !== undefined) { + RouteOptions.encode( + message.routeOptions, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -1048,6 +1082,9 @@ export const CreateHttpRouterRequest = { VirtualHost.decode(reader, reader.uint32()) ); break; + case 7: + message.routeOptions = RouteOptions.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1081,6 +1118,10 @@ export const CreateHttpRouterRequest = { message.virtualHosts = (object.virtualHosts ?? []).map((e: any) => VirtualHost.fromJSON(e) ); + message.routeOptions = + object.routeOptions !== undefined && object.routeOptions !== null + ? RouteOptions.fromJSON(object.routeOptions) + : undefined; return message; }, @@ -1103,6 +1144,10 @@ export const CreateHttpRouterRequest = { } else { obj.virtualHosts = []; } + message.routeOptions !== undefined && + (obj.routeOptions = message.routeOptions + ? RouteOptions.toJSON(message.routeOptions) + : undefined); return obj; }, @@ -1125,6 +1170,10 @@ export const CreateHttpRouterRequest = { }, {}); message.virtualHosts = object.virtualHosts?.map((e) => VirtualHost.fromPartial(e)) || []; + message.routeOptions = + object.routeOptions !== undefined && object.routeOptions !== null + ? RouteOptions.fromPartial(object.routeOptions) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts index 31b86332..2deca4f3 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts @@ -2,6 +2,7 @@ import { messageTypeRegistry } from "../../../../typeRegistry"; import Long from "long"; import _m0 from "protobufjs/minimal"; +import { LogOptions } from "../../../../yandex/cloud/apploadbalancer/v1/logging"; import { Target } from "../../../../yandex/cloud/apploadbalancer/v1/target_group"; import { Timestamp } from "../../../../google/protobuf/timestamp"; @@ -59,6 +60,19 @@ export interface LoadBalancer { securityGroupIds: string[]; /** Creation timestamp. */ createdAt?: Date; + /** + * Scaling settings of the application load balancer. + * + * The scaling settings relate to a special internal instance group which facilitates the balancer's work. + * Instances in this group are called _resource units_. The group is scaled automatically based on incoming load + * and within limitations specified in these settings. + * + * For details about the concept, + * see [documentation](/docs/application-load-balancer/concepts/application-load-balancer#lcu-scaling). + */ + autoScalePolicy?: AutoScalePolicy; + /** Cloud logging settings of the application load balancer. */ + logOptions?: LogOptions; } export enum LoadBalancer_Status { @@ -324,6 +338,8 @@ export interface HttpHandler { http2Options?: Http2Options | undefined; /** Enables support for incoming HTTP/1.0 and HTTP/1.1 requests and disables it for HTTP/2 requests. */ allowHttp10: boolean | undefined; + /** When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. */ + rewriteRequestId: boolean; } /** A listener redirects resource. */ @@ -470,6 +486,29 @@ export interface TargetState_ZoneHealthcheckStatus { failedActiveHc: boolean; } +/** A resource for scaling settings of an application load balancer. */ +export interface AutoScalePolicy { + $type: "yandex.cloud.apploadbalancer.v1.AutoScalePolicy"; + /** + * Lower limit for the number of resource units in each availability zone. + * + * If not specified previously (using other instruments such as management console), the default value is 2. + * To revert to it, specify it explicitly. + * + * The minimum value is 2. + */ + minZoneSize: number; + /** + * Upper limit for the total number of resource units across all availability zones. + * + * If a positive value is specified, it must be at least [min_zone_size] multiplied by the size of + * [AllocationPolicy.locations]. + * + * If the value is 0, there is no upper limit. + */ + maxSize: number; +} + const baseLoadBalancer: object = { $type: "yandex.cloud.apploadbalancer.v1.LoadBalancer", id: "", @@ -542,6 +581,15 @@ export const LoadBalancer = { writer.uint32(106).fork() ).ldelim(); } + if (message.autoScalePolicy !== undefined) { + AutoScalePolicy.encode( + message.autoScalePolicy, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.logOptions !== undefined) { + LogOptions.encode(message.logOptions, writer.uint32(122).fork()).ldelim(); + } return writer; }, @@ -605,6 +653,15 @@ export const LoadBalancer = { Timestamp.decode(reader, reader.uint32()) ); break; + case 14: + message.autoScalePolicy = AutoScalePolicy.decode( + reader, + reader.uint32() + ); + break; + case 15: + message.logOptions = LogOptions.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -665,6 +722,14 @@ export const LoadBalancer = { object.createdAt !== undefined && object.createdAt !== null ? fromJsonTimestamp(object.createdAt) : undefined; + message.autoScalePolicy = + object.autoScalePolicy !== undefined && object.autoScalePolicy !== null + ? AutoScalePolicy.fromJSON(object.autoScalePolicy) + : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromJSON(object.logOptions) + : undefined; return message; }, @@ -704,6 +769,14 @@ export const LoadBalancer = { } message.createdAt !== undefined && (obj.createdAt = message.createdAt.toISOString()); + message.autoScalePolicy !== undefined && + (obj.autoScalePolicy = message.autoScalePolicy + ? AutoScalePolicy.toJSON(message.autoScalePolicy) + : undefined); + message.logOptions !== undefined && + (obj.logOptions = message.logOptions + ? LogOptions.toJSON(message.logOptions) + : undefined); return obj; }, @@ -735,6 +808,14 @@ export const LoadBalancer = { message.logGroupId = object.logGroupId ?? ""; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.createdAt = object.createdAt ?? undefined; + message.autoScalePolicy = + object.autoScalePolicy !== undefined && object.autoScalePolicy !== null + ? AutoScalePolicy.fromPartial(object.autoScalePolicy) + : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromPartial(object.logOptions) + : undefined; return message; }, }; @@ -1895,6 +1976,7 @@ messageTypeRegistry.set(StreamHandler.$type, StreamHandler); const baseHttpHandler: object = { $type: "yandex.cloud.apploadbalancer.v1.HttpHandler", httpRouterId: "", + rewriteRequestId: false, }; export const HttpHandler = { @@ -1916,6 +1998,9 @@ export const HttpHandler = { if (message.allowHttp10 !== undefined) { writer.uint32(24).bool(message.allowHttp10); } + if (message.rewriteRequestId === true) { + writer.uint32(32).bool(message.rewriteRequestId); + } return writer; }, @@ -1935,6 +2020,9 @@ export const HttpHandler = { case 3: message.allowHttp10 = reader.bool(); break; + case 4: + message.rewriteRequestId = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1957,6 +2045,10 @@ export const HttpHandler = { object.allowHttp10 !== undefined && object.allowHttp10 !== null ? Boolean(object.allowHttp10) : undefined; + message.rewriteRequestId = + object.rewriteRequestId !== undefined && object.rewriteRequestId !== null + ? Boolean(object.rewriteRequestId) + : false; return message; }, @@ -1970,6 +2062,8 @@ export const HttpHandler = { : undefined); message.allowHttp10 !== undefined && (obj.allowHttp10 = message.allowHttp10); + message.rewriteRequestId !== undefined && + (obj.rewriteRequestId = message.rewriteRequestId); return obj; }, @@ -1983,6 +2077,7 @@ export const HttpHandler = { ? Http2Options.fromPartial(object.http2Options) : undefined; message.allowHttp10 = object.allowHttp10 ?? undefined; + message.rewriteRequestId = object.rewriteRequestId ?? false; return message; }, }; @@ -2533,6 +2628,83 @@ messageTypeRegistry.set( TargetState_ZoneHealthcheckStatus ); +const baseAutoScalePolicy: object = { + $type: "yandex.cloud.apploadbalancer.v1.AutoScalePolicy", + minZoneSize: 0, + maxSize: 0, +}; + +export const AutoScalePolicy = { + $type: "yandex.cloud.apploadbalancer.v1.AutoScalePolicy" as const, + + encode( + message: AutoScalePolicy, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.minZoneSize !== 0) { + writer.uint32(8).int64(message.minZoneSize); + } + if (message.maxSize !== 0) { + writer.uint32(16).int64(message.maxSize); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AutoScalePolicy { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAutoScalePolicy } as AutoScalePolicy; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.minZoneSize = longToNumber(reader.int64() as Long); + break; + case 2: + message.maxSize = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AutoScalePolicy { + const message = { ...baseAutoScalePolicy } as AutoScalePolicy; + message.minZoneSize = + object.minZoneSize !== undefined && object.minZoneSize !== null + ? Number(object.minZoneSize) + : 0; + message.maxSize = + object.maxSize !== undefined && object.maxSize !== null + ? Number(object.maxSize) + : 0; + return message; + }, + + toJSON(message: AutoScalePolicy): unknown { + const obj: any = {}; + message.minZoneSize !== undefined && + (obj.minZoneSize = Math.round(message.minZoneSize)); + message.maxSize !== undefined && + (obj.maxSize = Math.round(message.maxSize)); + return obj; + }, + + fromPartial, I>>( + object: I + ): AutoScalePolicy { + const message = { ...baseAutoScalePolicy } as AutoScalePolicy; + message.minZoneSize = object.minZoneSize ?? 0; + message.maxSize = object.maxSize ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(AutoScalePolicy.$type, AutoScalePolicy); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts index 2c45bc1c..bc74c3e4 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts @@ -17,6 +17,7 @@ import _m0 from "protobufjs/minimal"; import { FieldMask } from "../../../../google/protobuf/field_mask"; import { AllocationPolicy, + AutoScalePolicy, TlsHandler, LoadBalancer, HttpListener, @@ -24,6 +25,7 @@ import { StreamListener, TargetState, } from "../../../../yandex/cloud/apploadbalancer/v1/load_balancer"; +import { LogOptions } from "../../../../yandex/cloud/apploadbalancer/v1/logging"; import { Operation } from "../../../../yandex/cloud/operation/operation"; export const protobufPackage = "yandex.cloud.apploadbalancer.v1"; @@ -165,6 +167,19 @@ export interface UpdateLoadBalancerRequest { * 3. Send the new set in this field. */ securityGroupIds: string[]; + /** + * New scaling settings of the application load balancer. + * + * The scaling settings relate to a special internal instance group which facilitates the balancer's work. + * Instances in this group are called _resource units_. The group is scaled automatically based on incoming load + * and within limitations specified in these settings. + * + * For details about the concept, + * see [documentation](/docs/application-load-balancer/concepts/application-load-balancer#lcu-scaling). + */ + autoScalePolicy?: AutoScalePolicy; + /** Cloud logging settings of the application load balancer. */ + logOptions?: LogOptions; } export interface UpdateLoadBalancerRequest_LabelsEntry { @@ -228,6 +243,19 @@ export interface CreateLoadBalancerRequest { * see [documentation](/docs/application-load-balancer/concepts/application-load-balancer#security-groups). */ securityGroupIds: string[]; + /** + * Scaling settings of the application load balancer. + * + * The scaling settings relate to a special internal instance group which facilitates the balancer's work. + * Instances in this group are called _resource units_. The group is scaled automatically based on incoming load + * and within limitations specified in these settings. + * + * For details about the concept, + * see [documentation](/docs/application-load-balancer/concepts/application-load-balancer#lcu-scaling). + */ + autoScalePolicy?: AutoScalePolicy; + /** Cloud logging settings of the application load balancer. */ + logOptions?: LogOptions; } export interface CreateLoadBalancerRequest_LabelsEntry { @@ -1000,6 +1028,15 @@ export const UpdateLoadBalancerRequest = { for (const v of message.securityGroupIds) { writer.uint32(66).string(v!); } + if (message.autoScalePolicy !== undefined) { + AutoScalePolicy.encode( + message.autoScalePolicy, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.logOptions !== undefined) { + LogOptions.encode(message.logOptions, writer.uint32(82).fork()).ldelim(); + } return writer; }, @@ -1053,6 +1090,15 @@ export const UpdateLoadBalancerRequest = { case 8: message.securityGroupIds.push(reader.string()); break; + case 9: + message.autoScalePolicy = AutoScalePolicy.decode( + reader, + reader.uint32() + ); + break; + case 10: + message.logOptions = LogOptions.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1097,6 +1143,14 @@ export const UpdateLoadBalancerRequest = { message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => String(e) ); + message.autoScalePolicy = + object.autoScalePolicy !== undefined && object.autoScalePolicy !== null + ? AutoScalePolicy.fromJSON(object.autoScalePolicy) + : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromJSON(object.logOptions) + : undefined; return message; }, @@ -1133,6 +1187,14 @@ export const UpdateLoadBalancerRequest = { } else { obj.securityGroupIds = []; } + message.autoScalePolicy !== undefined && + (obj.autoScalePolicy = message.autoScalePolicy + ? AutoScalePolicy.toJSON(message.autoScalePolicy) + : undefined); + message.logOptions !== undefined && + (obj.logOptions = message.logOptions + ? LogOptions.toJSON(message.logOptions) + : undefined); return obj; }, @@ -1164,6 +1226,14 @@ export const UpdateLoadBalancerRequest = { ? AllocationPolicy.fromPartial(object.allocationPolicy) : undefined; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.autoScalePolicy = + object.autoScalePolicy !== undefined && object.autoScalePolicy !== null + ? AutoScalePolicy.fromPartial(object.autoScalePolicy) + : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromPartial(object.logOptions) + : undefined; return message; }, }; @@ -1390,6 +1460,15 @@ export const CreateLoadBalancerRequest = { for (const v of message.securityGroupIds) { writer.uint32(74).string(v!); } + if (message.autoScalePolicy !== undefined) { + AutoScalePolicy.encode( + message.autoScalePolicy, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.logOptions !== undefined) { + LogOptions.encode(message.logOptions, writer.uint32(90).fork()).ldelim(); + } return writer; }, @@ -1446,6 +1525,15 @@ export const CreateLoadBalancerRequest = { case 9: message.securityGroupIds.push(reader.string()); break; + case 10: + message.autoScalePolicy = AutoScalePolicy.decode( + reader, + reader.uint32() + ); + break; + case 11: + message.logOptions = LogOptions.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1494,6 +1582,14 @@ export const CreateLoadBalancerRequest = { message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => String(e) ); + message.autoScalePolicy = + object.autoScalePolicy !== undefined && object.autoScalePolicy !== null + ? AutoScalePolicy.fromJSON(object.autoScalePolicy) + : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromJSON(object.logOptions) + : undefined; return message; }, @@ -1527,6 +1623,14 @@ export const CreateLoadBalancerRequest = { } else { obj.securityGroupIds = []; } + message.autoScalePolicy !== undefined && + (obj.autoScalePolicy = message.autoScalePolicy + ? AutoScalePolicy.toJSON(message.autoScalePolicy) + : undefined); + message.logOptions !== undefined && + (obj.logOptions = message.logOptions + ? LogOptions.toJSON(message.logOptions) + : undefined); return obj; }, @@ -1556,6 +1660,14 @@ export const CreateLoadBalancerRequest = { ? AllocationPolicy.fromPartial(object.allocationPolicy) : undefined; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.autoScalePolicy = + object.autoScalePolicy !== undefined && object.autoScalePolicy !== null + ? AutoScalePolicy.fromPartial(object.autoScalePolicy) + : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromPartial(object.logOptions) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/logging.ts b/src/generated/yandex/cloud/apploadbalancer/v1/logging.ts new file mode 100644 index 00000000..69a91942 --- /dev/null +++ b/src/generated/yandex/cloud/apploadbalancer/v1/logging.ts @@ -0,0 +1,392 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Code, codeFromJSON, codeToJSON } from "../../../../google/rpc/code"; +import { Int64Value } from "../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.apploadbalancer.v1"; + +export enum HttpCodeInterval { + HTTP_CODE_INTERVAL_UNSPECIFIED = 0, + HTTP_1XX = 1, + HTTP_2XX = 2, + HTTP_3XX = 3, + HTTP_4XX = 4, + HTTP_5XX = 5, + HTTP_ALL = 6, + UNRECOGNIZED = -1, +} + +export function httpCodeIntervalFromJSON(object: any): HttpCodeInterval { + switch (object) { + case 0: + case "HTTP_CODE_INTERVAL_UNSPECIFIED": + return HttpCodeInterval.HTTP_CODE_INTERVAL_UNSPECIFIED; + case 1: + case "HTTP_1XX": + return HttpCodeInterval.HTTP_1XX; + case 2: + case "HTTP_2XX": + return HttpCodeInterval.HTTP_2XX; + case 3: + case "HTTP_3XX": + return HttpCodeInterval.HTTP_3XX; + case 4: + case "HTTP_4XX": + return HttpCodeInterval.HTTP_4XX; + case 5: + case "HTTP_5XX": + return HttpCodeInterval.HTTP_5XX; + case 6: + case "HTTP_ALL": + return HttpCodeInterval.HTTP_ALL; + case -1: + case "UNRECOGNIZED": + default: + return HttpCodeInterval.UNRECOGNIZED; + } +} + +export function httpCodeIntervalToJSON(object: HttpCodeInterval): string { + switch (object) { + case HttpCodeInterval.HTTP_CODE_INTERVAL_UNSPECIFIED: + return "HTTP_CODE_INTERVAL_UNSPECIFIED"; + case HttpCodeInterval.HTTP_1XX: + return "HTTP_1XX"; + case HttpCodeInterval.HTTP_2XX: + return "HTTP_2XX"; + case HttpCodeInterval.HTTP_3XX: + return "HTTP_3XX"; + case HttpCodeInterval.HTTP_4XX: + return "HTTP_4XX"; + case HttpCodeInterval.HTTP_5XX: + return "HTTP_5XX"; + case HttpCodeInterval.HTTP_ALL: + return "HTTP_ALL"; + default: + return "UNKNOWN"; + } +} + +/** + * LogDiscardRule discards a fraction of logs with certain codes. + * If neither codes or intervals are provided, rule applies to all logs. + */ +export interface LogDiscardRule { + $type: "yandex.cloud.apploadbalancer.v1.LogDiscardRule"; + /** HTTP codes that should be discarded. */ + httpCodes: number[]; + /** Groups of HTTP codes like 4xx that should be discarded. */ + httpCodeIntervals: HttpCodeInterval[]; + /** GRPC codes that should be discarded */ + grpcCodes: Code[]; + /** Percent of logs to be discarded: 0 - keep all, 100 or unset - discard all */ + discardPercent?: number; +} + +export interface LogOptions { + $type: "yandex.cloud.apploadbalancer.v1.LogOptions"; + /** + * Cloud Logging log group ID to store access logs. + * If not set then logs will be stored in default log group for the folder + * where load balancer located. + */ + logGroupId: string; + /** ordered list of rules, first matching rule applies */ + discardRules: LogDiscardRule[]; + /** Do not send logs to Cloud Logging log group. */ + disable: boolean; +} + +const baseLogDiscardRule: object = { + $type: "yandex.cloud.apploadbalancer.v1.LogDiscardRule", + httpCodes: 0, + httpCodeIntervals: 0, + grpcCodes: 0, +}; + +export const LogDiscardRule = { + $type: "yandex.cloud.apploadbalancer.v1.LogDiscardRule" as const, + + encode( + message: LogDiscardRule, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + writer.uint32(10).fork(); + for (const v of message.httpCodes) { + writer.int64(v); + } + writer.ldelim(); + writer.uint32(18).fork(); + for (const v of message.httpCodeIntervals) { + writer.int32(v); + } + writer.ldelim(); + writer.uint32(26).fork(); + for (const v of message.grpcCodes) { + writer.int32(v); + } + writer.ldelim(); + if (message.discardPercent !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.discardPercent! }, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LogDiscardRule { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLogDiscardRule } as LogDiscardRule; + message.httpCodes = []; + message.httpCodeIntervals = []; + message.grpcCodes = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.httpCodes.push(longToNumber(reader.int64() as Long)); + } + } else { + message.httpCodes.push(longToNumber(reader.int64() as Long)); + } + break; + case 2: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.httpCodeIntervals.push(reader.int32() as any); + } + } else { + message.httpCodeIntervals.push(reader.int32() as any); + } + break; + case 3: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.grpcCodes.push(reader.int32() as any); + } + } else { + message.grpcCodes.push(reader.int32() as any); + } + break; + case 4: + message.discardPercent = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LogDiscardRule { + const message = { ...baseLogDiscardRule } as LogDiscardRule; + message.httpCodes = (object.httpCodes ?? []).map((e: any) => Number(e)); + message.httpCodeIntervals = (object.httpCodeIntervals ?? []).map((e: any) => + httpCodeIntervalFromJSON(e) + ); + message.grpcCodes = (object.grpcCodes ?? []).map((e: any) => + codeFromJSON(e) + ); + message.discardPercent = + object.discardPercent !== undefined && object.discardPercent !== null + ? Number(object.discardPercent) + : undefined; + return message; + }, + + toJSON(message: LogDiscardRule): unknown { + const obj: any = {}; + if (message.httpCodes) { + obj.httpCodes = message.httpCodes.map((e) => Math.round(e)); + } else { + obj.httpCodes = []; + } + if (message.httpCodeIntervals) { + obj.httpCodeIntervals = message.httpCodeIntervals.map((e) => + httpCodeIntervalToJSON(e) + ); + } else { + obj.httpCodeIntervals = []; + } + if (message.grpcCodes) { + obj.grpcCodes = message.grpcCodes.map((e) => codeToJSON(e)); + } else { + obj.grpcCodes = []; + } + message.discardPercent !== undefined && + (obj.discardPercent = message.discardPercent); + return obj; + }, + + fromPartial, I>>( + object: I + ): LogDiscardRule { + const message = { ...baseLogDiscardRule } as LogDiscardRule; + message.httpCodes = object.httpCodes?.map((e) => e) || []; + message.httpCodeIntervals = object.httpCodeIntervals?.map((e) => e) || []; + message.grpcCodes = object.grpcCodes?.map((e) => e) || []; + message.discardPercent = object.discardPercent ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(LogDiscardRule.$type, LogDiscardRule); + +const baseLogOptions: object = { + $type: "yandex.cloud.apploadbalancer.v1.LogOptions", + logGroupId: "", + disable: false, +}; + +export const LogOptions = { + $type: "yandex.cloud.apploadbalancer.v1.LogOptions" as const, + + encode( + message: LogOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.logGroupId !== "") { + writer.uint32(10).string(message.logGroupId); + } + for (const v of message.discardRules) { + LogDiscardRule.encode(v!, writer.uint32(18).fork()).ldelim(); + } + if (message.disable === true) { + writer.uint32(24).bool(message.disable); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LogOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLogOptions } as LogOptions; + message.discardRules = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.logGroupId = reader.string(); + break; + case 2: + message.discardRules.push( + LogDiscardRule.decode(reader, reader.uint32()) + ); + break; + case 3: + message.disable = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LogOptions { + const message = { ...baseLogOptions } as LogOptions; + message.logGroupId = + object.logGroupId !== undefined && object.logGroupId !== null + ? String(object.logGroupId) + : ""; + message.discardRules = (object.discardRules ?? []).map((e: any) => + LogDiscardRule.fromJSON(e) + ); + message.disable = + object.disable !== undefined && object.disable !== null + ? Boolean(object.disable) + : false; + return message; + }, + + toJSON(message: LogOptions): unknown { + const obj: any = {}; + message.logGroupId !== undefined && (obj.logGroupId = message.logGroupId); + if (message.discardRules) { + obj.discardRules = message.discardRules.map((e) => + e ? LogDiscardRule.toJSON(e) : undefined + ); + } else { + obj.discardRules = []; + } + message.disable !== undefined && (obj.disable = message.disable); + return obj; + }, + + fromPartial, I>>( + object: I + ): LogOptions { + const message = { ...baseLogOptions } as LogOptions; + message.logGroupId = object.logGroupId ?? ""; + message.discardRules = + object.discardRules?.map((e) => LogDiscardRule.fromPartial(e)) || []; + message.disable = object.disable ?? false; + return message; + }, +}; + +messageTypeRegistry.set(LogOptions.$type, LogOptions); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts b/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts index 9be8db55..bbd25699 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts @@ -54,6 +54,93 @@ export interface RouteOptions { modifyRequestHeaders: HeaderModification[]; /** Apply the following modifications to the response headers. */ modifyResponseHeaders: HeaderModification[]; + rbac?: RBAC; + /** Security profile that will take effect to all requests routed via particular virtual host. */ + securityProfileId: string; +} + +/** + * Role Based Access Control (RBAC) provides router, virtual host, and route access control for the ALB + * service. Requests are allowed or denied based on the `action` and whether a matching principal is + * found. For instance, if the action is ALLOW and a matching principal is found the request should be + * allowed. + */ +export interface RBAC { + $type: "yandex.cloud.apploadbalancer.v1.RBAC"; + /** The action to take if a principal matches. Every action either allows or denies a request. */ + action: RBAC_Action; + /** Required. A match occurs when at least one matches the request. */ + principals: Principals[]; +} + +export enum RBAC_Action { + ACTION_UNSPECIFIED = 0, + /** ALLOW - Allows the request if and only if there is a principal that matches the request. */ + ALLOW = 1, + /** DENY - Allows the request if and only if there are no principal that match the request. */ + DENY = 2, + UNRECOGNIZED = -1, +} + +export function rBAC_ActionFromJSON(object: any): RBAC_Action { + switch (object) { + case 0: + case "ACTION_UNSPECIFIED": + return RBAC_Action.ACTION_UNSPECIFIED; + case 1: + case "ALLOW": + return RBAC_Action.ALLOW; + case 2: + case "DENY": + return RBAC_Action.DENY; + case -1: + case "UNRECOGNIZED": + default: + return RBAC_Action.UNRECOGNIZED; + } +} + +export function rBAC_ActionToJSON(object: RBAC_Action): string { + switch (object) { + case RBAC_Action.ACTION_UNSPECIFIED: + return "ACTION_UNSPECIFIED"; + case RBAC_Action.ALLOW: + return "ALLOW"; + case RBAC_Action.DENY: + return "DENY"; + default: + return "UNKNOWN"; + } +} + +/** Principals define a group of identities for a request. */ +export interface Principals { + $type: "yandex.cloud.apploadbalancer.v1.Principals"; + /** Required. A match occurs when all principals match the request. */ + andPrincipals: Principal[]; +} + +/** Principal defines an identity for a request. */ +export interface Principal { + $type: "yandex.cloud.apploadbalancer.v1.Principal"; + /** A header (or pseudo-header such as :path or :method) of the incoming HTTP request. */ + header?: Principal_HeaderMatcher | undefined; + /** A CIDR block or IP that describes the request remote/origin address, e.g. ``192.0.0.0/24`` or``192.0.0.4`` . */ + remoteIp: string | undefined; + /** When any is set, it matches any request. */ + any: boolean | undefined; +} + +export interface Principal_HeaderMatcher { + $type: "yandex.cloud.apploadbalancer.v1.Principal.HeaderMatcher"; + /** Specifies the name of the header in the request. */ + name: string; + /** + * Specifies how the header match will be performed to route the request. + * In the absence of value a request that has specified header name will match, + * regardless of the header's value. + */ + value?: StringMatch; } /** A header modification resource. */ @@ -394,7 +481,7 @@ export interface HttpRouteAction { /** * Replacement for the path prefix matched by [StringMatch]. * - * For instance, if [StringMatch.prefix_match] value is `/foo` and `replace_prefix` value is `/bar`, + * For instance, if [StringMatch.prefix_match] value is `/foo` and `prefix_rewrite` value is `/bar`, * a request with `/foobaz` path is forwarded with `/barbaz` path. * For [StringMatch.exact_match], the whole path is replaced. * @@ -600,6 +687,7 @@ messageTypeRegistry.set(VirtualHost.$type, VirtualHost); const baseRouteOptions: object = { $type: "yandex.cloud.apploadbalancer.v1.RouteOptions", + securityProfileId: "", }; export const RouteOptions = { @@ -615,6 +703,12 @@ export const RouteOptions = { for (const v of message.modifyResponseHeaders) { HeaderModification.encode(v!, writer.uint32(18).fork()).ldelim(); } + if (message.rbac !== undefined) { + RBAC.encode(message.rbac, writer.uint32(26).fork()).ldelim(); + } + if (message.securityProfileId !== "") { + writer.uint32(34).string(message.securityProfileId); + } return writer; }, @@ -637,6 +731,12 @@ export const RouteOptions = { HeaderModification.decode(reader, reader.uint32()) ); break; + case 3: + message.rbac = RBAC.decode(reader, reader.uint32()); + break; + case 4: + message.securityProfileId = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -653,6 +753,15 @@ export const RouteOptions = { message.modifyResponseHeaders = (object.modifyResponseHeaders ?? []).map( (e: any) => HeaderModification.fromJSON(e) ); + message.rbac = + object.rbac !== undefined && object.rbac !== null + ? RBAC.fromJSON(object.rbac) + : undefined; + message.securityProfileId = + object.securityProfileId !== undefined && + object.securityProfileId !== null + ? String(object.securityProfileId) + : ""; return message; }, @@ -672,6 +781,10 @@ export const RouteOptions = { } else { obj.modifyResponseHeaders = []; } + message.rbac !== undefined && + (obj.rbac = message.rbac ? RBAC.toJSON(message.rbac) : undefined); + message.securityProfileId !== undefined && + (obj.securityProfileId = message.securityProfileId); return obj; }, @@ -687,12 +800,348 @@ export const RouteOptions = { object.modifyResponseHeaders?.map((e) => HeaderModification.fromPartial(e) ) || []; + message.rbac = + object.rbac !== undefined && object.rbac !== null + ? RBAC.fromPartial(object.rbac) + : undefined; + message.securityProfileId = object.securityProfileId ?? ""; return message; }, }; messageTypeRegistry.set(RouteOptions.$type, RouteOptions); +const baseRBAC: object = { + $type: "yandex.cloud.apploadbalancer.v1.RBAC", + action: 0, +}; + +export const RBAC = { + $type: "yandex.cloud.apploadbalancer.v1.RBAC" as const, + + encode(message: RBAC, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.action !== 0) { + writer.uint32(8).int32(message.action); + } + for (const v of message.principals) { + Principals.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RBAC { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRBAC } as RBAC; + message.principals = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.action = reader.int32() as any; + break; + case 2: + message.principals.push(Principals.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RBAC { + const message = { ...baseRBAC } as RBAC; + message.action = + object.action !== undefined && object.action !== null + ? rBAC_ActionFromJSON(object.action) + : 0; + message.principals = (object.principals ?? []).map((e: any) => + Principals.fromJSON(e) + ); + return message; + }, + + toJSON(message: RBAC): unknown { + const obj: any = {}; + message.action !== undefined && + (obj.action = rBAC_ActionToJSON(message.action)); + if (message.principals) { + obj.principals = message.principals.map((e) => + e ? Principals.toJSON(e) : undefined + ); + } else { + obj.principals = []; + } + return obj; + }, + + fromPartial, I>>(object: I): RBAC { + const message = { ...baseRBAC } as RBAC; + message.action = object.action ?? 0; + message.principals = + object.principals?.map((e) => Principals.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(RBAC.$type, RBAC); + +const basePrincipals: object = { + $type: "yandex.cloud.apploadbalancer.v1.Principals", +}; + +export const Principals = { + $type: "yandex.cloud.apploadbalancer.v1.Principals" as const, + + encode( + message: Principals, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.andPrincipals) { + Principal.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Principals { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePrincipals } as Principals; + message.andPrincipals = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.andPrincipals.push(Principal.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Principals { + const message = { ...basePrincipals } as Principals; + message.andPrincipals = (object.andPrincipals ?? []).map((e: any) => + Principal.fromJSON(e) + ); + return message; + }, + + toJSON(message: Principals): unknown { + const obj: any = {}; + if (message.andPrincipals) { + obj.andPrincipals = message.andPrincipals.map((e) => + e ? Principal.toJSON(e) : undefined + ); + } else { + obj.andPrincipals = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): Principals { + const message = { ...basePrincipals } as Principals; + message.andPrincipals = + object.andPrincipals?.map((e) => Principal.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Principals.$type, Principals); + +const basePrincipal: object = { + $type: "yandex.cloud.apploadbalancer.v1.Principal", +}; + +export const Principal = { + $type: "yandex.cloud.apploadbalancer.v1.Principal" as const, + + encode( + message: Principal, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.header !== undefined) { + Principal_HeaderMatcher.encode( + message.header, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.remoteIp !== undefined) { + writer.uint32(18).string(message.remoteIp); + } + if (message.any !== undefined) { + writer.uint32(24).bool(message.any); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Principal { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePrincipal } as Principal; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.header = Principal_HeaderMatcher.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.remoteIp = reader.string(); + break; + case 3: + message.any = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Principal { + const message = { ...basePrincipal } as Principal; + message.header = + object.header !== undefined && object.header !== null + ? Principal_HeaderMatcher.fromJSON(object.header) + : undefined; + message.remoteIp = + object.remoteIp !== undefined && object.remoteIp !== null + ? String(object.remoteIp) + : undefined; + message.any = + object.any !== undefined && object.any !== null + ? Boolean(object.any) + : undefined; + return message; + }, + + toJSON(message: Principal): unknown { + const obj: any = {}; + message.header !== undefined && + (obj.header = message.header + ? Principal_HeaderMatcher.toJSON(message.header) + : undefined); + message.remoteIp !== undefined && (obj.remoteIp = message.remoteIp); + message.any !== undefined && (obj.any = message.any); + return obj; + }, + + fromPartial, I>>( + object: I + ): Principal { + const message = { ...basePrincipal } as Principal; + message.header = + object.header !== undefined && object.header !== null + ? Principal_HeaderMatcher.fromPartial(object.header) + : undefined; + message.remoteIp = object.remoteIp ?? undefined; + message.any = object.any ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Principal.$type, Principal); + +const basePrincipal_HeaderMatcher: object = { + $type: "yandex.cloud.apploadbalancer.v1.Principal.HeaderMatcher", + name: "", +}; + +export const Principal_HeaderMatcher = { + $type: "yandex.cloud.apploadbalancer.v1.Principal.HeaderMatcher" as const, + + encode( + message: Principal_HeaderMatcher, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.value !== undefined) { + StringMatch.encode(message.value, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Principal_HeaderMatcher { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePrincipal_HeaderMatcher, + } as Principal_HeaderMatcher; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.value = StringMatch.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Principal_HeaderMatcher { + const message = { + ...basePrincipal_HeaderMatcher, + } as Principal_HeaderMatcher; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.value = + object.value !== undefined && object.value !== null + ? StringMatch.fromJSON(object.value) + : undefined; + return message; + }, + + toJSON(message: Principal_HeaderMatcher): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.value !== undefined && + (obj.value = message.value + ? StringMatch.toJSON(message.value) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Principal_HeaderMatcher { + const message = { + ...basePrincipal_HeaderMatcher, + } as Principal_HeaderMatcher; + message.name = object.name ?? ""; + message.value = + object.value !== undefined && object.value !== null + ? StringMatch.fromPartial(object.value) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Principal_HeaderMatcher.$type, Principal_HeaderMatcher); + const baseHeaderModification: object = { $type: "yandex.cloud.apploadbalancer.v1.HeaderModification", name: "", diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host_service.ts b/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host_service.ts index 3a60025c..227db79a 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host_service.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host_service.ts @@ -14,14 +14,15 @@ import { ServiceError, } from "@grpc/grpc-js"; import _m0 from "protobufjs/minimal"; -import { FieldMask } from "../../../../google/protobuf/field_mask"; import { + RouteOptions, VirtualHost, Route, HeaderModification, HttpRoute, GrpcRoute, } from "../../../../yandex/cloud/apploadbalancer/v1/virtual_host"; +import { FieldMask } from "../../../../google/protobuf/field_mask"; import { Operation } from "../../../../yandex/cloud/operation/operation"; export const protobufPackage = "yandex.cloud.apploadbalancer.v1"; @@ -121,6 +122,8 @@ export interface CreateVirtualHostRequest { * before responses are forwarded to clients. */ modifyResponseHeaders: HeaderModification[]; + /** Route options for the virtual host. */ + routeOptions?: RouteOptions; } export interface CreateVirtualHostMetadata { @@ -194,6 +197,8 @@ export interface UpdateVirtualHostRequest { * Existing list of modifications is completely replaced by the specified list. */ modifyResponseHeaders: HeaderModification[]; + /** New route options for the virtual host. */ + routeOptions?: RouteOptions; } export interface UpdateVirtualHostMetadata { @@ -286,6 +291,8 @@ export interface UpdateRouteRequest { http?: HttpRoute | undefined; /** New settings of the gRPC route. */ grpc?: GrpcRoute | undefined; + /** New route options for the route. */ + routeOptions?: RouteOptions; } export interface UpdateRouteMetadata { @@ -605,6 +612,12 @@ export const CreateVirtualHostRequest = { for (const v of message.modifyResponseHeaders) { HeaderModification.encode(v!, writer.uint32(58).fork()).ldelim(); } + if (message.routeOptions !== undefined) { + RouteOptions.encode( + message.routeOptions, + writer.uint32(66).fork() + ).ldelim(); + } return writer; }, @@ -646,6 +659,9 @@ export const CreateVirtualHostRequest = { HeaderModification.decode(reader, reader.uint32()) ); break; + case 8: + message.routeOptions = RouteOptions.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -674,6 +690,10 @@ export const CreateVirtualHostRequest = { message.modifyResponseHeaders = (object.modifyResponseHeaders ?? []).map( (e: any) => HeaderModification.fromJSON(e) ); + message.routeOptions = + object.routeOptions !== undefined && object.routeOptions !== null + ? RouteOptions.fromJSON(object.routeOptions) + : undefined; return message; }, @@ -706,6 +726,10 @@ export const CreateVirtualHostRequest = { } else { obj.modifyResponseHeaders = []; } + message.routeOptions !== undefined && + (obj.routeOptions = message.routeOptions + ? RouteOptions.toJSON(message.routeOptions) + : undefined); return obj; }, @@ -727,6 +751,10 @@ export const CreateVirtualHostRequest = { object.modifyResponseHeaders?.map((e) => HeaderModification.fromPartial(e) ) || []; + message.routeOptions = + object.routeOptions !== undefined && object.routeOptions !== null + ? RouteOptions.fromPartial(object.routeOptions) + : undefined; return message; }, }; @@ -860,6 +888,12 @@ export const UpdateVirtualHostRequest = { for (const v of message.modifyResponseHeaders) { HeaderModification.encode(v!, writer.uint32(66).fork()).ldelim(); } + if (message.routeOptions !== undefined) { + RouteOptions.encode( + message.routeOptions, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -904,6 +938,9 @@ export const UpdateVirtualHostRequest = { HeaderModification.decode(reader, reader.uint32()) ); break; + case 9: + message.routeOptions = RouteOptions.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -936,6 +973,10 @@ export const UpdateVirtualHostRequest = { message.modifyResponseHeaders = (object.modifyResponseHeaders ?? []).map( (e: any) => HeaderModification.fromJSON(e) ); + message.routeOptions = + object.routeOptions !== undefined && object.routeOptions !== null + ? RouteOptions.fromJSON(object.routeOptions) + : undefined; return message; }, @@ -973,6 +1014,10 @@ export const UpdateVirtualHostRequest = { } else { obj.modifyResponseHeaders = []; } + message.routeOptions !== undefined && + (obj.routeOptions = message.routeOptions + ? RouteOptions.toJSON(message.routeOptions) + : undefined); return obj; }, @@ -998,6 +1043,10 @@ export const UpdateVirtualHostRequest = { object.modifyResponseHeaders?.map((e) => HeaderModification.fromPartial(e) ) || []; + message.routeOptions = + object.routeOptions !== undefined && object.routeOptions !== null + ? RouteOptions.fromPartial(object.routeOptions) + : undefined; return message; }, }; @@ -1486,6 +1535,12 @@ export const UpdateRouteRequest = { if (message.grpc !== undefined) { GrpcRoute.encode(message.grpc, writer.uint32(50).fork()).ldelim(); } + if (message.routeOptions !== undefined) { + RouteOptions.encode( + message.routeOptions, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -1514,6 +1569,9 @@ export const UpdateRouteRequest = { case 6: message.grpc = GrpcRoute.decode(reader, reader.uint32()); break; + case 7: + message.routeOptions = RouteOptions.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1548,6 +1606,10 @@ export const UpdateRouteRequest = { object.grpc !== undefined && object.grpc !== null ? GrpcRoute.fromJSON(object.grpc) : undefined; + message.routeOptions = + object.routeOptions !== undefined && object.routeOptions !== null + ? RouteOptions.fromJSON(object.routeOptions) + : undefined; return message; }, @@ -1566,6 +1628,10 @@ export const UpdateRouteRequest = { (obj.http = message.http ? HttpRoute.toJSON(message.http) : undefined); message.grpc !== undefined && (obj.grpc = message.grpc ? GrpcRoute.toJSON(message.grpc) : undefined); + message.routeOptions !== undefined && + (obj.routeOptions = message.routeOptions + ? RouteOptions.toJSON(message.routeOptions) + : undefined); return obj; }, @@ -1588,6 +1654,10 @@ export const UpdateRouteRequest = { object.grpc !== undefined && object.grpc !== null ? GrpcRoute.fromPartial(object.grpc) : undefined; + message.routeOptions = + object.routeOptions !== undefined && object.routeOptions !== null + ? RouteOptions.fromPartial(object.routeOptions) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/backup/index.ts b/src/generated/yandex/cloud/backup/index.ts new file mode 100644 index 00000000..1b05d72e --- /dev/null +++ b/src/generated/yandex/cloud/backup/index.ts @@ -0,0 +1,7 @@ +export * as backup from './v1/backup' +export * as backup_service from './v1/backup_service' +export * as policy from './v1/policy' +export * as policy_service from './v1/policy_service' +export * as provider_service from './v1/provider_service' +export * as resource from './v1/resource' +export * as resource_service from './v1/resource_service' \ No newline at end of file diff --git a/src/generated/yandex/cloud/backup/v1/backup.ts b/src/generated/yandex/cloud/backup/v1/backup.ts new file mode 100644 index 00000000..f161738b --- /dev/null +++ b/src/generated/yandex/cloud/backup/v1/backup.ts @@ -0,0 +1,1773 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Format, + formatFromJSON, + formatToJSON, +} from "../../../../yandex/cloud/backup/v1/policy"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; +import { StringValue } from "../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.backup.v1"; + +/** Archive is a container that holds backups of Compute Cloud instance. */ +export interface Archive { + $type: "yandex.cloud.backup.v1.Archive"; + /** ID of the backup. */ + id: string; + /** Name of the backup. */ + name: string; + /** ID of the backup vault. */ + vaultId: string; + /** Archive attributes. */ + attributes?: Archive_ArchiveAttributes; + /** Archive size. */ + size: number; + /** Compressed data size. */ + compressedDataSize: number; + /** Data size. */ + dataSize: number; + /** Original data size. */ + originalDataSize: number; + /** Logical size. */ + logicalSize: number; + format: Format; + createdAt?: Date; + updatedAt?: Date; + lastBackupCreatedAt?: Date; + lastSeenAt?: Date; + /** + * If this field is true, it means that any of encryption algorithm + * has been chosen. + */ + protectedByPassword: boolean; + encryptionAlgorithm: Archive_EncryptionAlgorithm; + actions: Archive_Action[]; + /** Backup plan ID. */ + backupPlanId: string; + /** Backup plan name. */ + backupPlanName: string; + /** Backup plan description. */ + description: string; + /** Display name, e.g. `INSTANCE_NAME - POLICY_NAME`. */ + displayName: string; + /** Compute Cloud instance ID. */ + computeInstanceId: string; + /** If this field is true, it means that the archive is consistent. */ + consistent: boolean; + /** If this field is true, it means that the archive was deleted. */ + deleted: boolean; + /** Resource ID. */ + resourceId: string; +} + +/** + * Encryption Algorithm for underlying backups: + * `ENCRYPTION_ALGORITHM_UNSPECIFIED`, `NONE`, `AES128`, `AES192`, + * `AES256`. + */ +export enum Archive_EncryptionAlgorithm { + ENCRYPTION_ALGORITHM_UNSPECIFIED = 0, + NONE = 1, + AES128 = 2, + AES192 = 3, + AES256 = 4, + UNRECOGNIZED = -1, +} + +export function archive_EncryptionAlgorithmFromJSON( + object: any +): Archive_EncryptionAlgorithm { + switch (object) { + case 0: + case "ENCRYPTION_ALGORITHM_UNSPECIFIED": + return Archive_EncryptionAlgorithm.ENCRYPTION_ALGORITHM_UNSPECIFIED; + case 1: + case "NONE": + return Archive_EncryptionAlgorithm.NONE; + case 2: + case "AES128": + return Archive_EncryptionAlgorithm.AES128; + case 3: + case "AES192": + return Archive_EncryptionAlgorithm.AES192; + case 4: + case "AES256": + return Archive_EncryptionAlgorithm.AES256; + case -1: + case "UNRECOGNIZED": + default: + return Archive_EncryptionAlgorithm.UNRECOGNIZED; + } +} + +export function archive_EncryptionAlgorithmToJSON( + object: Archive_EncryptionAlgorithm +): string { + switch (object) { + case Archive_EncryptionAlgorithm.ENCRYPTION_ALGORITHM_UNSPECIFIED: + return "ENCRYPTION_ALGORITHM_UNSPECIFIED"; + case Archive_EncryptionAlgorithm.NONE: + return "NONE"; + case Archive_EncryptionAlgorithm.AES128: + return "AES128"; + case Archive_EncryptionAlgorithm.AES192: + return "AES192"; + case Archive_EncryptionAlgorithm.AES256: + return "AES256"; + default: + return "UNKNOWN"; + } +} + +/** + * Action with archive backup: `ACTION_UNSPECIFIED`, `REFRESH`, + * `DELETE_BY_AGENT`. + */ +export enum Archive_Action { + ACTION_UNSPECIFIED = 0, + REFRESH = 1, + DELETE_BY_AGENT = 2, + UNRECOGNIZED = -1, +} + +export function archive_ActionFromJSON(object: any): Archive_Action { + switch (object) { + case 0: + case "ACTION_UNSPECIFIED": + return Archive_Action.ACTION_UNSPECIFIED; + case 1: + case "REFRESH": + return Archive_Action.REFRESH; + case 2: + case "DELETE_BY_AGENT": + return Archive_Action.DELETE_BY_AGENT; + case -1: + case "UNRECOGNIZED": + default: + return Archive_Action.UNRECOGNIZED; + } +} + +export function archive_ActionToJSON(object: Archive_Action): string { + switch (object) { + case Archive_Action.ACTION_UNSPECIFIED: + return "ACTION_UNSPECIFIED"; + case Archive_Action.REFRESH: + return "REFRESH"; + case Archive_Action.DELETE_BY_AGENT: + return "DELETE_BY_AGENT"; + default: + return "UNKNOWN"; + } +} + +/** Archive attributes. */ +export interface Archive_ArchiveAttributes { + $type: "yandex.cloud.backup.v1.Archive.ArchiveAttributes"; + /** Archive attribute. Default value: `0`. */ + aaib: string; + /** URI of the backup archive. */ + uri: string; +} + +export interface Volume { + $type: "yandex.cloud.backup.v1.Volume"; + /** Free space in the volume. */ + freeSpace: number; + /** If this field is true, it means that the volume is bootable. */ + isBootable: boolean; + /** If this field is true, it means that the volume is a system volume. */ + isSystem: boolean; + /** Volume name. */ + name: string; + /** Volume size. */ + size: number; + /** Mount string ID. */ + mountStrid: string; +} + +export interface Disk { + $type: "yandex.cloud.backup.v1.Disk"; + /** Device model. */ + deviceModel: string; + /** Disk name. */ + name: string; + /** Disk size. */ + size: number; + volumes: Volume[]; +} + +export interface Backup { + $type: "yandex.cloud.backup.v1.Backup"; + /** ID of the backup. */ + id: string; + /** ID of the backup vault. */ + vaultId: string; + /** ID of the backup archive. */ + archiveId: string; + createdAt?: Date; + lastSeenAt?: Date; + /** Backup size. */ + size: number; + /** Deduplicated backup size. */ + deduplicatedSize: number; + /** Backed up data size. */ + backedUpDataSize: number; + /** Original data size. */ + originalDataSize: number; + attributes?: Backup_BackupAttributes; + /** Compute Cloud instance ID. */ + computeInstanceId: string; + disks: Disk[]; + type: Backup_Type; + /** If this field is true, it means that the backup was deleted. */ + deleted: boolean; + /** [Policy](/docs/backup/concepts/policy) ID. */ + policyId: string; + /** Resource ID. It identifies Compute Cloud instance in backup service. */ + resourceId: string; +} + +/** + * Backup type. + * For detailed information, please see [Backup types](/docs/backup/concepts/backup#types). + */ +export enum Backup_Type { + TYPE_UNSPECIFIED = 0, + FULL = 1, + INCREMENTAL = 2, + UNRECOGNIZED = -1, +} + +export function backup_TypeFromJSON(object: any): Backup_Type { + switch (object) { + case 0: + case "TYPE_UNSPECIFIED": + return Backup_Type.TYPE_UNSPECIFIED; + case 1: + case "FULL": + return Backup_Type.FULL; + case 2: + case "INCREMENTAL": + return Backup_Type.INCREMENTAL; + case -1: + case "UNRECOGNIZED": + default: + return Backup_Type.UNRECOGNIZED; + } +} + +export function backup_TypeToJSON(object: Backup_Type): string { + switch (object) { + case Backup_Type.TYPE_UNSPECIFIED: + return "TYPE_UNSPECIFIED"; + case Backup_Type.FULL: + return "FULL"; + case Backup_Type.INCREMENTAL: + return "INCREMENTAL"; + default: + return "UNKNOWN"; + } +} + +/** Backup attributes. */ +export interface Backup_BackupAttributes { + $type: "yandex.cloud.backup.v1.Backup.BackupAttributes"; + /** Backup stream name. */ + streamName: string; + /** URI of the backup archive. */ + uri: string; +} + +/** BackupFile represents a single unit of file or directory system inside the backup. */ +export interface BackupFile { + $type: "yandex.cloud.backup.v1.BackupFile"; + /** ID of the item. Should be used as source ID in case of listing. */ + id: string; + /** Might be empty if this is root directory. */ + parentId?: string; + /** Type of the item. */ + type: BackupFile_Type; + /** Absolute path of the item. */ + fullPath: string; + /** Name of the directory / file. */ + name: string; + /** Size in bytes of the item. */ + size: number; + /** Actions that might be done on the object. */ + actions?: BackupFile_Actions; + modifiedAt?: Date; +} + +/** Type of the file. */ +export enum BackupFile_Type { + TYPE_UNSPECIFIED = 0, + TYPE_DIR = 1, + TYPE_FILE = 2, + UNRECOGNIZED = -1, +} + +export function backupFile_TypeFromJSON(object: any): BackupFile_Type { + switch (object) { + case 0: + case "TYPE_UNSPECIFIED": + return BackupFile_Type.TYPE_UNSPECIFIED; + case 1: + case "TYPE_DIR": + return BackupFile_Type.TYPE_DIR; + case 2: + case "TYPE_FILE": + return BackupFile_Type.TYPE_FILE; + case -1: + case "UNRECOGNIZED": + default: + return BackupFile_Type.UNRECOGNIZED; + } +} + +export function backupFile_TypeToJSON(object: BackupFile_Type): string { + switch (object) { + case BackupFile_Type.TYPE_UNSPECIFIED: + return "TYPE_UNSPECIFIED"; + case BackupFile_Type.TYPE_DIR: + return "TYPE_DIR"; + case BackupFile_Type.TYPE_FILE: + return "TYPE_FILE"; + default: + return "UNKNOWN"; + } +} + +export interface BackupFile_Actions { + $type: "yandex.cloud.backup.v1.BackupFile.Actions"; + /** Allows to send request to restore item to disk */ + restoreToDisk: boolean; + /** Allows to move to location by id. */ + goToLocation: boolean; +} + +const baseArchive: object = { + $type: "yandex.cloud.backup.v1.Archive", + id: "", + name: "", + vaultId: "", + size: 0, + compressedDataSize: 0, + dataSize: 0, + originalDataSize: 0, + logicalSize: 0, + format: 0, + protectedByPassword: false, + encryptionAlgorithm: 0, + actions: 0, + backupPlanId: "", + backupPlanName: "", + description: "", + displayName: "", + computeInstanceId: "", + consistent: false, + deleted: false, + resourceId: "", +}; + +export const Archive = { + $type: "yandex.cloud.backup.v1.Archive" as const, + + encode( + message: Archive, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.vaultId !== "") { + writer.uint32(26).string(message.vaultId); + } + if (message.attributes !== undefined) { + Archive_ArchiveAttributes.encode( + message.attributes, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.size !== 0) { + writer.uint32(40).int64(message.size); + } + if (message.compressedDataSize !== 0) { + writer.uint32(48).int64(message.compressedDataSize); + } + if (message.dataSize !== 0) { + writer.uint32(56).int64(message.dataSize); + } + if (message.originalDataSize !== 0) { + writer.uint32(64).int64(message.originalDataSize); + } + if (message.logicalSize !== 0) { + writer.uint32(72).int64(message.logicalSize); + } + if (message.format !== 0) { + writer.uint32(80).int32(message.format); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(90).fork() + ).ldelim(); + } + if (message.updatedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.updatedAt), + writer.uint32(98).fork() + ).ldelim(); + } + if (message.lastBackupCreatedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.lastBackupCreatedAt), + writer.uint32(106).fork() + ).ldelim(); + } + if (message.lastSeenAt !== undefined) { + Timestamp.encode( + toTimestamp(message.lastSeenAt), + writer.uint32(114).fork() + ).ldelim(); + } + if (message.protectedByPassword === true) { + writer.uint32(120).bool(message.protectedByPassword); + } + if (message.encryptionAlgorithm !== 0) { + writer.uint32(128).int32(message.encryptionAlgorithm); + } + writer.uint32(162).fork(); + for (const v of message.actions) { + writer.int32(v); + } + writer.ldelim(); + if (message.backupPlanId !== "") { + writer.uint32(178).string(message.backupPlanId); + } + if (message.backupPlanName !== "") { + writer.uint32(186).string(message.backupPlanName); + } + if (message.description !== "") { + writer.uint32(194).string(message.description); + } + if (message.displayName !== "") { + writer.uint32(202).string(message.displayName); + } + if (message.computeInstanceId !== "") { + writer.uint32(210).string(message.computeInstanceId); + } + if (message.consistent === true) { + writer.uint32(216).bool(message.consistent); + } + if (message.deleted === true) { + writer.uint32(240).bool(message.deleted); + } + if (message.resourceId !== "") { + writer.uint32(250).string(message.resourceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Archive { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseArchive } as Archive; + message.actions = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.vaultId = reader.string(); + break; + case 4: + message.attributes = Archive_ArchiveAttributes.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.size = longToNumber(reader.int64() as Long); + break; + case 6: + message.compressedDataSize = longToNumber(reader.int64() as Long); + break; + case 7: + message.dataSize = longToNumber(reader.int64() as Long); + break; + case 8: + message.originalDataSize = longToNumber(reader.int64() as Long); + break; + case 9: + message.logicalSize = longToNumber(reader.int64() as Long); + break; + case 10: + message.format = reader.int32() as any; + break; + case 11: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 12: + message.updatedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 13: + message.lastBackupCreatedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 14: + message.lastSeenAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 15: + message.protectedByPassword = reader.bool(); + break; + case 16: + message.encryptionAlgorithm = reader.int32() as any; + break; + case 20: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.actions.push(reader.int32() as any); + } + } else { + message.actions.push(reader.int32() as any); + } + break; + case 22: + message.backupPlanId = reader.string(); + break; + case 23: + message.backupPlanName = reader.string(); + break; + case 24: + message.description = reader.string(); + break; + case 25: + message.displayName = reader.string(); + break; + case 26: + message.computeInstanceId = reader.string(); + break; + case 27: + message.consistent = reader.bool(); + break; + case 30: + message.deleted = reader.bool(); + break; + case 31: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Archive { + const message = { ...baseArchive } as Archive; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.vaultId = + object.vaultId !== undefined && object.vaultId !== null + ? String(object.vaultId) + : ""; + message.attributes = + object.attributes !== undefined && object.attributes !== null + ? Archive_ArchiveAttributes.fromJSON(object.attributes) + : undefined; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + message.compressedDataSize = + object.compressedDataSize !== undefined && + object.compressedDataSize !== null + ? Number(object.compressedDataSize) + : 0; + message.dataSize = + object.dataSize !== undefined && object.dataSize !== null + ? Number(object.dataSize) + : 0; + message.originalDataSize = + object.originalDataSize !== undefined && object.originalDataSize !== null + ? Number(object.originalDataSize) + : 0; + message.logicalSize = + object.logicalSize !== undefined && object.logicalSize !== null + ? Number(object.logicalSize) + : 0; + message.format = + object.format !== undefined && object.format !== null + ? formatFromJSON(object.format) + : 0; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.updatedAt = + object.updatedAt !== undefined && object.updatedAt !== null + ? fromJsonTimestamp(object.updatedAt) + : undefined; + message.lastBackupCreatedAt = + object.lastBackupCreatedAt !== undefined && + object.lastBackupCreatedAt !== null + ? fromJsonTimestamp(object.lastBackupCreatedAt) + : undefined; + message.lastSeenAt = + object.lastSeenAt !== undefined && object.lastSeenAt !== null + ? fromJsonTimestamp(object.lastSeenAt) + : undefined; + message.protectedByPassword = + object.protectedByPassword !== undefined && + object.protectedByPassword !== null + ? Boolean(object.protectedByPassword) + : false; + message.encryptionAlgorithm = + object.encryptionAlgorithm !== undefined && + object.encryptionAlgorithm !== null + ? archive_EncryptionAlgorithmFromJSON(object.encryptionAlgorithm) + : 0; + message.actions = (object.actions ?? []).map((e: any) => + archive_ActionFromJSON(e) + ); + message.backupPlanId = + object.backupPlanId !== undefined && object.backupPlanId !== null + ? String(object.backupPlanId) + : ""; + message.backupPlanName = + object.backupPlanName !== undefined && object.backupPlanName !== null + ? String(object.backupPlanName) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.displayName = + object.displayName !== undefined && object.displayName !== null + ? String(object.displayName) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.consistent = + object.consistent !== undefined && object.consistent !== null + ? Boolean(object.consistent) + : false; + message.deleted = + object.deleted !== undefined && object.deleted !== null + ? Boolean(object.deleted) + : false; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: Archive): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.name !== undefined && (obj.name = message.name); + message.vaultId !== undefined && (obj.vaultId = message.vaultId); + message.attributes !== undefined && + (obj.attributes = message.attributes + ? Archive_ArchiveAttributes.toJSON(message.attributes) + : undefined); + message.size !== undefined && (obj.size = Math.round(message.size)); + message.compressedDataSize !== undefined && + (obj.compressedDataSize = Math.round(message.compressedDataSize)); + message.dataSize !== undefined && + (obj.dataSize = Math.round(message.dataSize)); + message.originalDataSize !== undefined && + (obj.originalDataSize = Math.round(message.originalDataSize)); + message.logicalSize !== undefined && + (obj.logicalSize = Math.round(message.logicalSize)); + message.format !== undefined && (obj.format = formatToJSON(message.format)); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.updatedAt !== undefined && + (obj.updatedAt = message.updatedAt.toISOString()); + message.lastBackupCreatedAt !== undefined && + (obj.lastBackupCreatedAt = message.lastBackupCreatedAt.toISOString()); + message.lastSeenAt !== undefined && + (obj.lastSeenAt = message.lastSeenAt.toISOString()); + message.protectedByPassword !== undefined && + (obj.protectedByPassword = message.protectedByPassword); + message.encryptionAlgorithm !== undefined && + (obj.encryptionAlgorithm = archive_EncryptionAlgorithmToJSON( + message.encryptionAlgorithm + )); + if (message.actions) { + obj.actions = message.actions.map((e) => archive_ActionToJSON(e)); + } else { + obj.actions = []; + } + message.backupPlanId !== undefined && + (obj.backupPlanId = message.backupPlanId); + message.backupPlanName !== undefined && + (obj.backupPlanName = message.backupPlanName); + message.description !== undefined && + (obj.description = message.description); + message.displayName !== undefined && + (obj.displayName = message.displayName); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.consistent !== undefined && (obj.consistent = message.consistent); + message.deleted !== undefined && (obj.deleted = message.deleted); + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>(object: I): Archive { + const message = { ...baseArchive } as Archive; + message.id = object.id ?? ""; + message.name = object.name ?? ""; + message.vaultId = object.vaultId ?? ""; + message.attributes = + object.attributes !== undefined && object.attributes !== null + ? Archive_ArchiveAttributes.fromPartial(object.attributes) + : undefined; + message.size = object.size ?? 0; + message.compressedDataSize = object.compressedDataSize ?? 0; + message.dataSize = object.dataSize ?? 0; + message.originalDataSize = object.originalDataSize ?? 0; + message.logicalSize = object.logicalSize ?? 0; + message.format = object.format ?? 0; + message.createdAt = object.createdAt ?? undefined; + message.updatedAt = object.updatedAt ?? undefined; + message.lastBackupCreatedAt = object.lastBackupCreatedAt ?? undefined; + message.lastSeenAt = object.lastSeenAt ?? undefined; + message.protectedByPassword = object.protectedByPassword ?? false; + message.encryptionAlgorithm = object.encryptionAlgorithm ?? 0; + message.actions = object.actions?.map((e) => e) || []; + message.backupPlanId = object.backupPlanId ?? ""; + message.backupPlanName = object.backupPlanName ?? ""; + message.description = object.description ?? ""; + message.displayName = object.displayName ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.consistent = object.consistent ?? false; + message.deleted = object.deleted ?? false; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Archive.$type, Archive); + +const baseArchive_ArchiveAttributes: object = { + $type: "yandex.cloud.backup.v1.Archive.ArchiveAttributes", + aaib: "", + uri: "", +}; + +export const Archive_ArchiveAttributes = { + $type: "yandex.cloud.backup.v1.Archive.ArchiveAttributes" as const, + + encode( + message: Archive_ArchiveAttributes, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.aaib !== "") { + writer.uint32(10).string(message.aaib); + } + if (message.uri !== "") { + writer.uint32(18).string(message.uri); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Archive_ArchiveAttributes { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseArchive_ArchiveAttributes, + } as Archive_ArchiveAttributes; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.aaib = reader.string(); + break; + case 2: + message.uri = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Archive_ArchiveAttributes { + const message = { + ...baseArchive_ArchiveAttributes, + } as Archive_ArchiveAttributes; + message.aaib = + object.aaib !== undefined && object.aaib !== null + ? String(object.aaib) + : ""; + message.uri = + object.uri !== undefined && object.uri !== null ? String(object.uri) : ""; + return message; + }, + + toJSON(message: Archive_ArchiveAttributes): unknown { + const obj: any = {}; + message.aaib !== undefined && (obj.aaib = message.aaib); + message.uri !== undefined && (obj.uri = message.uri); + return obj; + }, + + fromPartial, I>>( + object: I + ): Archive_ArchiveAttributes { + const message = { + ...baseArchive_ArchiveAttributes, + } as Archive_ArchiveAttributes; + message.aaib = object.aaib ?? ""; + message.uri = object.uri ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + Archive_ArchiveAttributes.$type, + Archive_ArchiveAttributes +); + +const baseVolume: object = { + $type: "yandex.cloud.backup.v1.Volume", + freeSpace: 0, + isBootable: false, + isSystem: false, + name: "", + size: 0, + mountStrid: "", +}; + +export const Volume = { + $type: "yandex.cloud.backup.v1.Volume" as const, + + encode( + message: Volume, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.freeSpace !== 0) { + writer.uint32(8).int64(message.freeSpace); + } + if (message.isBootable === true) { + writer.uint32(16).bool(message.isBootable); + } + if (message.isSystem === true) { + writer.uint32(24).bool(message.isSystem); + } + if (message.name !== "") { + writer.uint32(34).string(message.name); + } + if (message.size !== 0) { + writer.uint32(40).int64(message.size); + } + if (message.mountStrid !== "") { + writer.uint32(50).string(message.mountStrid); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Volume { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseVolume } as Volume; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.freeSpace = longToNumber(reader.int64() as Long); + break; + case 2: + message.isBootable = reader.bool(); + break; + case 3: + message.isSystem = reader.bool(); + break; + case 4: + message.name = reader.string(); + break; + case 5: + message.size = longToNumber(reader.int64() as Long); + break; + case 6: + message.mountStrid = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Volume { + const message = { ...baseVolume } as Volume; + message.freeSpace = + object.freeSpace !== undefined && object.freeSpace !== null + ? Number(object.freeSpace) + : 0; + message.isBootable = + object.isBootable !== undefined && object.isBootable !== null + ? Boolean(object.isBootable) + : false; + message.isSystem = + object.isSystem !== undefined && object.isSystem !== null + ? Boolean(object.isSystem) + : false; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + message.mountStrid = + object.mountStrid !== undefined && object.mountStrid !== null + ? String(object.mountStrid) + : ""; + return message; + }, + + toJSON(message: Volume): unknown { + const obj: any = {}; + message.freeSpace !== undefined && + (obj.freeSpace = Math.round(message.freeSpace)); + message.isBootable !== undefined && (obj.isBootable = message.isBootable); + message.isSystem !== undefined && (obj.isSystem = message.isSystem); + message.name !== undefined && (obj.name = message.name); + message.size !== undefined && (obj.size = Math.round(message.size)); + message.mountStrid !== undefined && (obj.mountStrid = message.mountStrid); + return obj; + }, + + fromPartial, I>>(object: I): Volume { + const message = { ...baseVolume } as Volume; + message.freeSpace = object.freeSpace ?? 0; + message.isBootable = object.isBootable ?? false; + message.isSystem = object.isSystem ?? false; + message.name = object.name ?? ""; + message.size = object.size ?? 0; + message.mountStrid = object.mountStrid ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Volume.$type, Volume); + +const baseDisk: object = { + $type: "yandex.cloud.backup.v1.Disk", + deviceModel: "", + name: "", + size: 0, +}; + +export const Disk = { + $type: "yandex.cloud.backup.v1.Disk" as const, + + encode(message: Disk, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.deviceModel !== "") { + writer.uint32(10).string(message.deviceModel); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.size !== 0) { + writer.uint32(24).int64(message.size); + } + for (const v of message.volumes) { + Volume.encode(v!, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Disk { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDisk } as Disk; + message.volumes = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.deviceModel = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.size = longToNumber(reader.int64() as Long); + break; + case 4: + message.volumes.push(Volume.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Disk { + const message = { ...baseDisk } as Disk; + message.deviceModel = + object.deviceModel !== undefined && object.deviceModel !== null + ? String(object.deviceModel) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + message.volumes = (object.volumes ?? []).map((e: any) => + Volume.fromJSON(e) + ); + return message; + }, + + toJSON(message: Disk): unknown { + const obj: any = {}; + message.deviceModel !== undefined && + (obj.deviceModel = message.deviceModel); + message.name !== undefined && (obj.name = message.name); + message.size !== undefined && (obj.size = Math.round(message.size)); + if (message.volumes) { + obj.volumes = message.volumes.map((e) => + e ? Volume.toJSON(e) : undefined + ); + } else { + obj.volumes = []; + } + return obj; + }, + + fromPartial, I>>(object: I): Disk { + const message = { ...baseDisk } as Disk; + message.deviceModel = object.deviceModel ?? ""; + message.name = object.name ?? ""; + message.size = object.size ?? 0; + message.volumes = object.volumes?.map((e) => Volume.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Disk.$type, Disk); + +const baseBackup: object = { + $type: "yandex.cloud.backup.v1.Backup", + id: "", + vaultId: "", + archiveId: "", + size: 0, + deduplicatedSize: 0, + backedUpDataSize: 0, + originalDataSize: 0, + computeInstanceId: "", + type: 0, + deleted: false, + policyId: "", + resourceId: "", +}; + +export const Backup = { + $type: "yandex.cloud.backup.v1.Backup" as const, + + encode( + message: Backup, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.vaultId !== "") { + writer.uint32(18).string(message.vaultId); + } + if (message.archiveId !== "") { + writer.uint32(26).string(message.archiveId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.lastSeenAt !== undefined) { + Timestamp.encode( + toTimestamp(message.lastSeenAt), + writer.uint32(42).fork() + ).ldelim(); + } + if (message.size !== 0) { + writer.uint32(48).int64(message.size); + } + if (message.deduplicatedSize !== 0) { + writer.uint32(56).int64(message.deduplicatedSize); + } + if (message.backedUpDataSize !== 0) { + writer.uint32(64).int64(message.backedUpDataSize); + } + if (message.originalDataSize !== 0) { + writer.uint32(72).int64(message.originalDataSize); + } + if (message.attributes !== undefined) { + Backup_BackupAttributes.encode( + message.attributes, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.computeInstanceId !== "") { + writer.uint32(90).string(message.computeInstanceId); + } + for (const v of message.disks) { + Disk.encode(v!, writer.uint32(114).fork()).ldelim(); + } + if (message.type !== 0) { + writer.uint32(120).int32(message.type); + } + if (message.deleted === true) { + writer.uint32(168).bool(message.deleted); + } + if (message.policyId !== "") { + writer.uint32(178).string(message.policyId); + } + if (message.resourceId !== "") { + writer.uint32(186).string(message.resourceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Backup { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackup } as Backup; + message.disks = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.vaultId = reader.string(); + break; + case 3: + message.archiveId = reader.string(); + break; + case 4: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.lastSeenAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.size = longToNumber(reader.int64() as Long); + break; + case 7: + message.deduplicatedSize = longToNumber(reader.int64() as Long); + break; + case 8: + message.backedUpDataSize = longToNumber(reader.int64() as Long); + break; + case 9: + message.originalDataSize = longToNumber(reader.int64() as Long); + break; + case 10: + message.attributes = Backup_BackupAttributes.decode( + reader, + reader.uint32() + ); + break; + case 11: + message.computeInstanceId = reader.string(); + break; + case 14: + message.disks.push(Disk.decode(reader, reader.uint32())); + break; + case 15: + message.type = reader.int32() as any; + break; + case 21: + message.deleted = reader.bool(); + break; + case 22: + message.policyId = reader.string(); + break; + case 23: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Backup { + const message = { ...baseBackup } as Backup; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.vaultId = + object.vaultId !== undefined && object.vaultId !== null + ? String(object.vaultId) + : ""; + message.archiveId = + object.archiveId !== undefined && object.archiveId !== null + ? String(object.archiveId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.lastSeenAt = + object.lastSeenAt !== undefined && object.lastSeenAt !== null + ? fromJsonTimestamp(object.lastSeenAt) + : undefined; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + message.deduplicatedSize = + object.deduplicatedSize !== undefined && object.deduplicatedSize !== null + ? Number(object.deduplicatedSize) + : 0; + message.backedUpDataSize = + object.backedUpDataSize !== undefined && object.backedUpDataSize !== null + ? Number(object.backedUpDataSize) + : 0; + message.originalDataSize = + object.originalDataSize !== undefined && object.originalDataSize !== null + ? Number(object.originalDataSize) + : 0; + message.attributes = + object.attributes !== undefined && object.attributes !== null + ? Backup_BackupAttributes.fromJSON(object.attributes) + : undefined; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.disks = (object.disks ?? []).map((e: any) => Disk.fromJSON(e)); + message.type = + object.type !== undefined && object.type !== null + ? backup_TypeFromJSON(object.type) + : 0; + message.deleted = + object.deleted !== undefined && object.deleted !== null + ? Boolean(object.deleted) + : false; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: Backup): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.vaultId !== undefined && (obj.vaultId = message.vaultId); + message.archiveId !== undefined && (obj.archiveId = message.archiveId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.lastSeenAt !== undefined && + (obj.lastSeenAt = message.lastSeenAt.toISOString()); + message.size !== undefined && (obj.size = Math.round(message.size)); + message.deduplicatedSize !== undefined && + (obj.deduplicatedSize = Math.round(message.deduplicatedSize)); + message.backedUpDataSize !== undefined && + (obj.backedUpDataSize = Math.round(message.backedUpDataSize)); + message.originalDataSize !== undefined && + (obj.originalDataSize = Math.round(message.originalDataSize)); + message.attributes !== undefined && + (obj.attributes = message.attributes + ? Backup_BackupAttributes.toJSON(message.attributes) + : undefined); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + if (message.disks) { + obj.disks = message.disks.map((e) => (e ? Disk.toJSON(e) : undefined)); + } else { + obj.disks = []; + } + message.type !== undefined && (obj.type = backup_TypeToJSON(message.type)); + message.deleted !== undefined && (obj.deleted = message.deleted); + message.policyId !== undefined && (obj.policyId = message.policyId); + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>(object: I): Backup { + const message = { ...baseBackup } as Backup; + message.id = object.id ?? ""; + message.vaultId = object.vaultId ?? ""; + message.archiveId = object.archiveId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.lastSeenAt = object.lastSeenAt ?? undefined; + message.size = object.size ?? 0; + message.deduplicatedSize = object.deduplicatedSize ?? 0; + message.backedUpDataSize = object.backedUpDataSize ?? 0; + message.originalDataSize = object.originalDataSize ?? 0; + message.attributes = + object.attributes !== undefined && object.attributes !== null + ? Backup_BackupAttributes.fromPartial(object.attributes) + : undefined; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.disks = object.disks?.map((e) => Disk.fromPartial(e)) || []; + message.type = object.type ?? 0; + message.deleted = object.deleted ?? false; + message.policyId = object.policyId ?? ""; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Backup.$type, Backup); + +const baseBackup_BackupAttributes: object = { + $type: "yandex.cloud.backup.v1.Backup.BackupAttributes", + streamName: "", + uri: "", +}; + +export const Backup_BackupAttributes = { + $type: "yandex.cloud.backup.v1.Backup.BackupAttributes" as const, + + encode( + message: Backup_BackupAttributes, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.streamName !== "") { + writer.uint32(10).string(message.streamName); + } + if (message.uri !== "") { + writer.uint32(18).string(message.uri); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Backup_BackupAttributes { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseBackup_BackupAttributes, + } as Backup_BackupAttributes; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.streamName = reader.string(); + break; + case 2: + message.uri = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Backup_BackupAttributes { + const message = { + ...baseBackup_BackupAttributes, + } as Backup_BackupAttributes; + message.streamName = + object.streamName !== undefined && object.streamName !== null + ? String(object.streamName) + : ""; + message.uri = + object.uri !== undefined && object.uri !== null ? String(object.uri) : ""; + return message; + }, + + toJSON(message: Backup_BackupAttributes): unknown { + const obj: any = {}; + message.streamName !== undefined && (obj.streamName = message.streamName); + message.uri !== undefined && (obj.uri = message.uri); + return obj; + }, + + fromPartial, I>>( + object: I + ): Backup_BackupAttributes { + const message = { + ...baseBackup_BackupAttributes, + } as Backup_BackupAttributes; + message.streamName = object.streamName ?? ""; + message.uri = object.uri ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Backup_BackupAttributes.$type, Backup_BackupAttributes); + +const baseBackupFile: object = { + $type: "yandex.cloud.backup.v1.BackupFile", + id: "", + type: 0, + fullPath: "", + name: "", + size: 0, +}; + +export const BackupFile = { + $type: "yandex.cloud.backup.v1.BackupFile" as const, + + encode( + message: BackupFile, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.parentId !== undefined) { + StringValue.encode( + { $type: "google.protobuf.StringValue", value: message.parentId! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.type !== 0) { + writer.uint32(24).int32(message.type); + } + if (message.fullPath !== "") { + writer.uint32(34).string(message.fullPath); + } + if (message.name !== "") { + writer.uint32(42).string(message.name); + } + if (message.size !== 0) { + writer.uint32(48).int64(message.size); + } + if (message.actions !== undefined) { + BackupFile_Actions.encode( + message.actions, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.modifiedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.modifiedAt), + writer.uint32(66).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): BackupFile { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackupFile } as BackupFile; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.parentId = StringValue.decode(reader, reader.uint32()).value; + break; + case 3: + message.type = reader.int32() as any; + break; + case 4: + message.fullPath = reader.string(); + break; + case 5: + message.name = reader.string(); + break; + case 6: + message.size = longToNumber(reader.int64() as Long); + break; + case 7: + message.actions = BackupFile_Actions.decode(reader, reader.uint32()); + break; + case 8: + message.modifiedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BackupFile { + const message = { ...baseBackupFile } as BackupFile; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.parentId = + object.parentId !== undefined && object.parentId !== null + ? String(object.parentId) + : undefined; + message.type = + object.type !== undefined && object.type !== null + ? backupFile_TypeFromJSON(object.type) + : 0; + message.fullPath = + object.fullPath !== undefined && object.fullPath !== null + ? String(object.fullPath) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + message.actions = + object.actions !== undefined && object.actions !== null + ? BackupFile_Actions.fromJSON(object.actions) + : undefined; + message.modifiedAt = + object.modifiedAt !== undefined && object.modifiedAt !== null + ? fromJsonTimestamp(object.modifiedAt) + : undefined; + return message; + }, + + toJSON(message: BackupFile): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.parentId !== undefined && (obj.parentId = message.parentId); + message.type !== undefined && + (obj.type = backupFile_TypeToJSON(message.type)); + message.fullPath !== undefined && (obj.fullPath = message.fullPath); + message.name !== undefined && (obj.name = message.name); + message.size !== undefined && (obj.size = Math.round(message.size)); + message.actions !== undefined && + (obj.actions = message.actions + ? BackupFile_Actions.toJSON(message.actions) + : undefined); + message.modifiedAt !== undefined && + (obj.modifiedAt = message.modifiedAt.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): BackupFile { + const message = { ...baseBackupFile } as BackupFile; + message.id = object.id ?? ""; + message.parentId = object.parentId ?? undefined; + message.type = object.type ?? 0; + message.fullPath = object.fullPath ?? ""; + message.name = object.name ?? ""; + message.size = object.size ?? 0; + message.actions = + object.actions !== undefined && object.actions !== null + ? BackupFile_Actions.fromPartial(object.actions) + : undefined; + message.modifiedAt = object.modifiedAt ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(BackupFile.$type, BackupFile); + +const baseBackupFile_Actions: object = { + $type: "yandex.cloud.backup.v1.BackupFile.Actions", + restoreToDisk: false, + goToLocation: false, +}; + +export const BackupFile_Actions = { + $type: "yandex.cloud.backup.v1.BackupFile.Actions" as const, + + encode( + message: BackupFile_Actions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.restoreToDisk === true) { + writer.uint32(8).bool(message.restoreToDisk); + } + if (message.goToLocation === true) { + writer.uint32(16).bool(message.goToLocation); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): BackupFile_Actions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackupFile_Actions } as BackupFile_Actions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.restoreToDisk = reader.bool(); + break; + case 2: + message.goToLocation = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BackupFile_Actions { + const message = { ...baseBackupFile_Actions } as BackupFile_Actions; + message.restoreToDisk = + object.restoreToDisk !== undefined && object.restoreToDisk !== null + ? Boolean(object.restoreToDisk) + : false; + message.goToLocation = + object.goToLocation !== undefined && object.goToLocation !== null + ? Boolean(object.goToLocation) + : false; + return message; + }, + + toJSON(message: BackupFile_Actions): unknown { + const obj: any = {}; + message.restoreToDisk !== undefined && + (obj.restoreToDisk = message.restoreToDisk); + message.goToLocation !== undefined && + (obj.goToLocation = message.goToLocation); + return obj; + }, + + fromPartial, I>>( + object: I + ): BackupFile_Actions { + const message = { ...baseBackupFile_Actions } as BackupFile_Actions; + message.restoreToDisk = object.restoreToDisk ?? false; + message.goToLocation = object.goToLocation ?? false; + return message; + }, +}; + +messageTypeRegistry.set(BackupFile_Actions.$type, BackupFile_Actions); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/backup/v1/backup_service.ts b/src/generated/yandex/cloud/backup/v1/backup_service.ts new file mode 100644 index 00000000..24af93c0 --- /dev/null +++ b/src/generated/yandex/cloud/backup/v1/backup_service.ts @@ -0,0 +1,2145 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + Archive, + Backup, + BackupFile, +} from "../../../../yandex/cloud/backup/v1/backup"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.backup.v1"; + +export interface ListArchivesRequest { + $type: "yandex.cloud.backup.v1.ListArchivesRequest"; + /** List of archives in specified folder. */ + folderId: string | undefined; + /** List of archives of the specified Compute Cloud instance. */ + computeInstanceId: string | undefined; +} + +export interface ListArchivesResponse { + $type: "yandex.cloud.backup.v1.ListArchivesResponse"; + archives: Archive[]; +} + +export interface ListBackupsRequest { + $type: "yandex.cloud.backup.v1.ListBackupsRequest"; + /** List backups that belongs to specific Compute Cloud instance. */ + computeInstanceId: string | undefined; + /** List backups that belongs to specific archive of specific folder. */ + archive?: ListBackupsRequest_ArchiveParameters | undefined; + /** List backups that belongs to specific folder. */ + folderId: string | undefined; + /** List backups that belongs to specific instance and policy at the same time. */ + instancePolicy?: ListBackupsRequest_InstancePolicy | undefined; + /** List backups by specific resource ID. */ + resourceId: string | undefined; + /** List backups by specific policy ID. */ + policyId: string | undefined; + /** + * By which column the listing should be ordered and in which direction, + * format is "createdAt desc". "createdAt desc" if omitted. + */ + orderBy: string; + /** + * Filter list by various parameters. + * Supported parameters are: + * * created_at + * + * Supported logic operators: + * * AND + */ + filter: string; +} + +export interface ListBackupsRequest_ArchiveParameters { + $type: "yandex.cloud.backup.v1.ListBackupsRequest.ArchiveParameters"; + /** Archive ID. */ + archiveId: string; + /** Folder ID. */ + folderId: string; +} + +export interface ListBackupsRequest_InstancePolicy { + $type: "yandex.cloud.backup.v1.ListBackupsRequest.InstancePolicy"; + /** Compute Cloud instance ID. */ + computeInstanceId: string; + /** Policy ID. */ + policyId: string; +} + +export interface ListBackupsResponse { + $type: "yandex.cloud.backup.v1.ListBackupsResponse"; + backups: Backup[]; +} + +export interface ListFilesRequest { + $type: "yandex.cloud.backup.v1.ListFilesRequest"; + /** Folder ID. */ + folderId: string; + /** Backup ID. */ + backupId: string; + /** Empty source will list disks of the backup. */ + sourceId: string; +} + +export interface ListFilesResponse { + $type: "yandex.cloud.backup.v1.ListFilesResponse"; + files: BackupFile[]; +} + +export interface GetBackupRequest { + $type: "yandex.cloud.backup.v1.GetBackupRequest"; + /** Backup ID. */ + backupId: string; + /** Folder ID. */ + folderId: string; +} + +export interface StartRecoveryRequest { + $type: "yandex.cloud.backup.v1.StartRecoveryRequest"; + /** Destination Compute Cloud instance ID to which backup should be applied. */ + computeInstanceId: string; + /** Backup ID that will be applied to destination Compute Cloud instance. */ + backupId: string; +} + +export interface StartRecoveryMetadata { + $type: "yandex.cloud.backup.v1.StartRecoveryMetadata"; + /** Progress of the backup process. */ + progressPercentage: number; + /** Source Backup ID that will be applied. */ + srcBackupId: string; + /** Destination Compute Cloud instance ID to which backup will be applied. */ + dstComputeInstanceId: string; +} + +export interface TargetPathOriginal { + $type: "yandex.cloud.backup.v1.TargetPathOriginal"; +} + +export interface TargetPathCustom { + $type: "yandex.cloud.backup.v1.TargetPathCustom"; + /** Custom folder for file recovery. */ + path: string; +} + +export interface FilesRecoveryOptions { + $type: "yandex.cloud.backup.v1.FilesRecoveryOptions"; + /** Overwrite options declares the behavior for files that already exists on the file system. */ + overwrite: FilesRecoveryOptions_Overwrite; + /** specifies whether the recovery plan is able to reboot host if needed. */ + rebootIfNeeded: boolean; + /** Keep original paths of files. */ + original?: TargetPathOriginal | undefined; + /** Set custom folder for file recovery. */ + custom?: TargetPathCustom | undefined; +} + +export enum FilesRecoveryOptions_Overwrite { + /** OVERWRITE_UNSPECIFIED - Unspecified value treated as Overwrite all */ + OVERWRITE_UNSPECIFIED = 0, + /** OVERWRITE_ALL - All overwrites all existing files by recovered ones. */ + OVERWRITE_ALL = 1, + /** OVERWRITE_OLDER - Older overwrites older files only. */ + OVERWRITE_OLDER = 2, + /** OVERWRITE_NONE - None does not overwrites files at all. */ + OVERWRITE_NONE = 3, + UNRECOGNIZED = -1, +} + +export function filesRecoveryOptions_OverwriteFromJSON( + object: any +): FilesRecoveryOptions_Overwrite { + switch (object) { + case 0: + case "OVERWRITE_UNSPECIFIED": + return FilesRecoveryOptions_Overwrite.OVERWRITE_UNSPECIFIED; + case 1: + case "OVERWRITE_ALL": + return FilesRecoveryOptions_Overwrite.OVERWRITE_ALL; + case 2: + case "OVERWRITE_OLDER": + return FilesRecoveryOptions_Overwrite.OVERWRITE_OLDER; + case 3: + case "OVERWRITE_NONE": + return FilesRecoveryOptions_Overwrite.OVERWRITE_NONE; + case -1: + case "UNRECOGNIZED": + default: + return FilesRecoveryOptions_Overwrite.UNRECOGNIZED; + } +} + +export function filesRecoveryOptions_OverwriteToJSON( + object: FilesRecoveryOptions_Overwrite +): string { + switch (object) { + case FilesRecoveryOptions_Overwrite.OVERWRITE_UNSPECIFIED: + return "OVERWRITE_UNSPECIFIED"; + case FilesRecoveryOptions_Overwrite.OVERWRITE_ALL: + return "OVERWRITE_ALL"; + case FilesRecoveryOptions_Overwrite.OVERWRITE_OLDER: + return "OVERWRITE_OLDER"; + case FilesRecoveryOptions_Overwrite.OVERWRITE_NONE: + return "OVERWRITE_NONE"; + default: + return "UNKNOWN"; + } +} + +export interface StartFilesRecoveryRequest { + $type: "yandex.cloud.backup.v1.StartFilesRecoveryRequest"; + /** Destination instance ID. */ + computeInstanceId: string; + /** Backup ID. */ + backupId: string; + opts?: FilesRecoveryOptions; + sourceIds: string[]; +} + +export interface StartFilesRecoveryMetadata { + $type: "yandex.cloud.backup.v1.StartFilesRecoveryMetadata"; + progressPercentage: number; + /** Destination instance ID. */ + computeInstanceId: string; + /** Backup ID. */ + backupId: string; + sourceIds: string[]; +} + +export interface DeleteBackupRequest { + $type: "yandex.cloud.backup.v1.DeleteBackupRequest"; + /** Compute Cloud instance ID of the Backup. */ + computeInstanceId: string; + /** Backup ID that should be deleted. */ + backupId: string; +} + +export interface DeleteBackupMetadata { + $type: "yandex.cloud.backup.v1.DeleteBackupMetadata"; + /** Compute Cloud instance ID of the Backup. */ + computeInstanceId: string; + /** Backup ID that should be deleted. */ + backupId: string; +} + +const baseListArchivesRequest: object = { + $type: "yandex.cloud.backup.v1.ListArchivesRequest", +}; + +export const ListArchivesRequest = { + $type: "yandex.cloud.backup.v1.ListArchivesRequest" as const, + + encode( + message: ListArchivesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== undefined) { + writer.uint32(10).string(message.folderId); + } + if (message.computeInstanceId !== undefined) { + writer.uint32(18).string(message.computeInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListArchivesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListArchivesRequest } as ListArchivesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListArchivesRequest { + const message = { ...baseListArchivesRequest } as ListArchivesRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : undefined; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : undefined; + return message; + }, + + toJSON(message: ListArchivesRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListArchivesRequest { + const message = { ...baseListArchivesRequest } as ListArchivesRequest; + message.folderId = object.folderId ?? undefined; + message.computeInstanceId = object.computeInstanceId ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(ListArchivesRequest.$type, ListArchivesRequest); + +const baseListArchivesResponse: object = { + $type: "yandex.cloud.backup.v1.ListArchivesResponse", +}; + +export const ListArchivesResponse = { + $type: "yandex.cloud.backup.v1.ListArchivesResponse" as const, + + encode( + message: ListArchivesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.archives) { + Archive.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListArchivesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListArchivesResponse } as ListArchivesResponse; + message.archives = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.archives.push(Archive.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListArchivesResponse { + const message = { ...baseListArchivesResponse } as ListArchivesResponse; + message.archives = (object.archives ?? []).map((e: any) => + Archive.fromJSON(e) + ); + return message; + }, + + toJSON(message: ListArchivesResponse): unknown { + const obj: any = {}; + if (message.archives) { + obj.archives = message.archives.map((e) => + e ? Archive.toJSON(e) : undefined + ); + } else { + obj.archives = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ListArchivesResponse { + const message = { ...baseListArchivesResponse } as ListArchivesResponse; + message.archives = + object.archives?.map((e) => Archive.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(ListArchivesResponse.$type, ListArchivesResponse); + +const baseListBackupsRequest: object = { + $type: "yandex.cloud.backup.v1.ListBackupsRequest", + orderBy: "", + filter: "", +}; + +export const ListBackupsRequest = { + $type: "yandex.cloud.backup.v1.ListBackupsRequest" as const, + + encode( + message: ListBackupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== undefined) { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.archive !== undefined) { + ListBackupsRequest_ArchiveParameters.encode( + message.archive, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.folderId !== undefined) { + writer.uint32(26).string(message.folderId); + } + if (message.instancePolicy !== undefined) { + ListBackupsRequest_InstancePolicy.encode( + message.instancePolicy, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.resourceId !== undefined) { + writer.uint32(50).string(message.resourceId); + } + if (message.policyId !== undefined) { + writer.uint32(58).string(message.policyId); + } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } + if (message.filter !== "") { + writer.uint32(66).string(message.filter); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.archive = ListBackupsRequest_ArchiveParameters.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.folderId = reader.string(); + break; + case 4: + message.instancePolicy = ListBackupsRequest_InstancePolicy.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.resourceId = reader.string(); + break; + case 7: + message.policyId = reader.string(); + break; + case 5: + message.orderBy = reader.string(); + break; + case 8: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : undefined; + message.archive = + object.archive !== undefined && object.archive !== null + ? ListBackupsRequest_ArchiveParameters.fromJSON(object.archive) + : undefined; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : undefined; + message.instancePolicy = + object.instancePolicy !== undefined && object.instancePolicy !== null + ? ListBackupsRequest_InstancePolicy.fromJSON(object.instancePolicy) + : undefined; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : undefined; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : undefined; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListBackupsRequest): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.archive !== undefined && + (obj.archive = message.archive + ? ListBackupsRequest_ArchiveParameters.toJSON(message.archive) + : undefined); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.instancePolicy !== undefined && + (obj.instancePolicy = message.instancePolicy + ? ListBackupsRequest_InstancePolicy.toJSON(message.instancePolicy) + : undefined); + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + message.policyId !== undefined && (obj.policyId = message.policyId); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.computeInstanceId = object.computeInstanceId ?? undefined; + message.archive = + object.archive !== undefined && object.archive !== null + ? ListBackupsRequest_ArchiveParameters.fromPartial(object.archive) + : undefined; + message.folderId = object.folderId ?? undefined; + message.instancePolicy = + object.instancePolicy !== undefined && object.instancePolicy !== null + ? ListBackupsRequest_InstancePolicy.fromPartial(object.instancePolicy) + : undefined; + message.resourceId = object.resourceId ?? undefined; + message.policyId = object.policyId ?? undefined; + message.orderBy = object.orderBy ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsRequest.$type, ListBackupsRequest); + +const baseListBackupsRequest_ArchiveParameters: object = { + $type: "yandex.cloud.backup.v1.ListBackupsRequest.ArchiveParameters", + archiveId: "", + folderId: "", +}; + +export const ListBackupsRequest_ArchiveParameters = { + $type: "yandex.cloud.backup.v1.ListBackupsRequest.ArchiveParameters" as const, + + encode( + message: ListBackupsRequest_ArchiveParameters, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.archiveId !== "") { + writer.uint32(10).string(message.archiveId); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListBackupsRequest_ArchiveParameters { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListBackupsRequest_ArchiveParameters, + } as ListBackupsRequest_ArchiveParameters; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.archiveId = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsRequest_ArchiveParameters { + const message = { + ...baseListBackupsRequest_ArchiveParameters, + } as ListBackupsRequest_ArchiveParameters; + message.archiveId = + object.archiveId !== undefined && object.archiveId !== null + ? String(object.archiveId) + : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + return message; + }, + + toJSON(message: ListBackupsRequest_ArchiveParameters): unknown { + const obj: any = {}; + message.archiveId !== undefined && (obj.archiveId = message.archiveId); + message.folderId !== undefined && (obj.folderId = message.folderId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListBackupsRequest_ArchiveParameters { + const message = { + ...baseListBackupsRequest_ArchiveParameters, + } as ListBackupsRequest_ArchiveParameters; + message.archiveId = object.archiveId ?? ""; + message.folderId = object.folderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListBackupsRequest_ArchiveParameters.$type, + ListBackupsRequest_ArchiveParameters +); + +const baseListBackupsRequest_InstancePolicy: object = { + $type: "yandex.cloud.backup.v1.ListBackupsRequest.InstancePolicy", + computeInstanceId: "", + policyId: "", +}; + +export const ListBackupsRequest_InstancePolicy = { + $type: "yandex.cloud.backup.v1.ListBackupsRequest.InstancePolicy" as const, + + encode( + message: ListBackupsRequest_InstancePolicy, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.policyId !== "") { + writer.uint32(18).string(message.policyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListBackupsRequest_InstancePolicy { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListBackupsRequest_InstancePolicy, + } as ListBackupsRequest_InstancePolicy; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.policyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsRequest_InstancePolicy { + const message = { + ...baseListBackupsRequest_InstancePolicy, + } as ListBackupsRequest_InstancePolicy; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + return message; + }, + + toJSON(message: ListBackupsRequest_InstancePolicy): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.policyId !== undefined && (obj.policyId = message.policyId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListBackupsRequest_InstancePolicy { + const message = { + ...baseListBackupsRequest_InstancePolicy, + } as ListBackupsRequest_InstancePolicy; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.policyId = object.policyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListBackupsRequest_InstancePolicy.$type, + ListBackupsRequest_InstancePolicy +); + +const baseListBackupsResponse: object = { + $type: "yandex.cloud.backup.v1.ListBackupsResponse", +}; + +export const ListBackupsResponse = { + $type: "yandex.cloud.backup.v1.ListBackupsResponse" as const, + + encode( + message: ListBackupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backups) { + Backup.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backups.push(Backup.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = (object.backups ?? []).map((e: any) => + Backup.fromJSON(e) + ); + return message; + }, + + toJSON(message: ListBackupsResponse): unknown { + const obj: any = {}; + if (message.backups) { + obj.backups = message.backups.map((e) => + e ? Backup.toJSON(e) : undefined + ); + } else { + obj.backups = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = object.backups?.map((e) => Backup.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); + +const baseListFilesRequest: object = { + $type: "yandex.cloud.backup.v1.ListFilesRequest", + folderId: "", + backupId: "", + sourceId: "", +}; + +export const ListFilesRequest = { + $type: "yandex.cloud.backup.v1.ListFilesRequest" as const, + + encode( + message: ListFilesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } + if (message.sourceId !== "") { + writer.uint32(26).string(message.sourceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListFilesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListFilesRequest } as ListFilesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.backupId = reader.string(); + break; + case 3: + message.sourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListFilesRequest { + const message = { ...baseListFilesRequest } as ListFilesRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.sourceId = + object.sourceId !== undefined && object.sourceId !== null + ? String(object.sourceId) + : ""; + return message; + }, + + toJSON(message: ListFilesRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.backupId !== undefined && (obj.backupId = message.backupId); + message.sourceId !== undefined && (obj.sourceId = message.sourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListFilesRequest { + const message = { ...baseListFilesRequest } as ListFilesRequest; + message.folderId = object.folderId ?? ""; + message.backupId = object.backupId ?? ""; + message.sourceId = object.sourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListFilesRequest.$type, ListFilesRequest); + +const baseListFilesResponse: object = { + $type: "yandex.cloud.backup.v1.ListFilesResponse", +}; + +export const ListFilesResponse = { + $type: "yandex.cloud.backup.v1.ListFilesResponse" as const, + + encode( + message: ListFilesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.files) { + BackupFile.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListFilesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListFilesResponse } as ListFilesResponse; + message.files = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.files.push(BackupFile.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListFilesResponse { + const message = { ...baseListFilesResponse } as ListFilesResponse; + message.files = (object.files ?? []).map((e: any) => + BackupFile.fromJSON(e) + ); + return message; + }, + + toJSON(message: ListFilesResponse): unknown { + const obj: any = {}; + if (message.files) { + obj.files = message.files.map((e) => + e ? BackupFile.toJSON(e) : undefined + ); + } else { + obj.files = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ListFilesResponse { + const message = { ...baseListFilesResponse } as ListFilesResponse; + message.files = object.files?.map((e) => BackupFile.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(ListFilesResponse.$type, ListFilesResponse); + +const baseGetBackupRequest: object = { + $type: "yandex.cloud.backup.v1.GetBackupRequest", + backupId: "", + folderId: "", +}; + +export const GetBackupRequest = { + $type: "yandex.cloud.backup.v1.GetBackupRequest" as const, + + encode( + message: GetBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetBackupRequest } as GetBackupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + return message; + }, + + toJSON(message: GetBackupRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + message.folderId !== undefined && (obj.folderId = message.folderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = object.backupId ?? ""; + message.folderId = object.folderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetBackupRequest.$type, GetBackupRequest); + +const baseStartRecoveryRequest: object = { + $type: "yandex.cloud.backup.v1.StartRecoveryRequest", + computeInstanceId: "", + backupId: "", +}; + +export const StartRecoveryRequest = { + $type: "yandex.cloud.backup.v1.StartRecoveryRequest" as const, + + encode( + message: StartRecoveryRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): StartRecoveryRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStartRecoveryRequest } as StartRecoveryRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StartRecoveryRequest { + const message = { ...baseStartRecoveryRequest } as StartRecoveryRequest; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: StartRecoveryRequest): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): StartRecoveryRequest { + const message = { ...baseStartRecoveryRequest } as StartRecoveryRequest; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StartRecoveryRequest.$type, StartRecoveryRequest); + +const baseStartRecoveryMetadata: object = { + $type: "yandex.cloud.backup.v1.StartRecoveryMetadata", + progressPercentage: 0, + srcBackupId: "", + dstComputeInstanceId: "", +}; + +export const StartRecoveryMetadata = { + $type: "yandex.cloud.backup.v1.StartRecoveryMetadata" as const, + + encode( + message: StartRecoveryMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.progressPercentage !== 0) { + writer.uint32(9).double(message.progressPercentage); + } + if (message.srcBackupId !== "") { + writer.uint32(18).string(message.srcBackupId); + } + if (message.dstComputeInstanceId !== "") { + writer.uint32(26).string(message.dstComputeInstanceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): StartRecoveryMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStartRecoveryMetadata } as StartRecoveryMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.progressPercentage = reader.double(); + break; + case 2: + message.srcBackupId = reader.string(); + break; + case 3: + message.dstComputeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StartRecoveryMetadata { + const message = { ...baseStartRecoveryMetadata } as StartRecoveryMetadata; + message.progressPercentage = + object.progressPercentage !== undefined && + object.progressPercentage !== null + ? Number(object.progressPercentage) + : 0; + message.srcBackupId = + object.srcBackupId !== undefined && object.srcBackupId !== null + ? String(object.srcBackupId) + : ""; + message.dstComputeInstanceId = + object.dstComputeInstanceId !== undefined && + object.dstComputeInstanceId !== null + ? String(object.dstComputeInstanceId) + : ""; + return message; + }, + + toJSON(message: StartRecoveryMetadata): unknown { + const obj: any = {}; + message.progressPercentage !== undefined && + (obj.progressPercentage = message.progressPercentage); + message.srcBackupId !== undefined && + (obj.srcBackupId = message.srcBackupId); + message.dstComputeInstanceId !== undefined && + (obj.dstComputeInstanceId = message.dstComputeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): StartRecoveryMetadata { + const message = { ...baseStartRecoveryMetadata } as StartRecoveryMetadata; + message.progressPercentage = object.progressPercentage ?? 0; + message.srcBackupId = object.srcBackupId ?? ""; + message.dstComputeInstanceId = object.dstComputeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StartRecoveryMetadata.$type, StartRecoveryMetadata); + +const baseTargetPathOriginal: object = { + $type: "yandex.cloud.backup.v1.TargetPathOriginal", +}; + +export const TargetPathOriginal = { + $type: "yandex.cloud.backup.v1.TargetPathOriginal" as const, + + encode( + _: TargetPathOriginal, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): TargetPathOriginal { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTargetPathOriginal } as TargetPathOriginal; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): TargetPathOriginal { + const message = { ...baseTargetPathOriginal } as TargetPathOriginal; + return message; + }, + + toJSON(_: TargetPathOriginal): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): TargetPathOriginal { + const message = { ...baseTargetPathOriginal } as TargetPathOriginal; + return message; + }, +}; + +messageTypeRegistry.set(TargetPathOriginal.$type, TargetPathOriginal); + +const baseTargetPathCustom: object = { + $type: "yandex.cloud.backup.v1.TargetPathCustom", + path: "", +}; + +export const TargetPathCustom = { + $type: "yandex.cloud.backup.v1.TargetPathCustom" as const, + + encode( + message: TargetPathCustom, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.path !== "") { + writer.uint32(10).string(message.path); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): TargetPathCustom { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTargetPathCustom } as TargetPathCustom; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.path = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TargetPathCustom { + const message = { ...baseTargetPathCustom } as TargetPathCustom; + message.path = + object.path !== undefined && object.path !== null + ? String(object.path) + : ""; + return message; + }, + + toJSON(message: TargetPathCustom): unknown { + const obj: any = {}; + message.path !== undefined && (obj.path = message.path); + return obj; + }, + + fromPartial, I>>( + object: I + ): TargetPathCustom { + const message = { ...baseTargetPathCustom } as TargetPathCustom; + message.path = object.path ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(TargetPathCustom.$type, TargetPathCustom); + +const baseFilesRecoveryOptions: object = { + $type: "yandex.cloud.backup.v1.FilesRecoveryOptions", + overwrite: 0, + rebootIfNeeded: false, +}; + +export const FilesRecoveryOptions = { + $type: "yandex.cloud.backup.v1.FilesRecoveryOptions" as const, + + encode( + message: FilesRecoveryOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.overwrite !== 0) { + writer.uint32(8).int32(message.overwrite); + } + if (message.rebootIfNeeded === true) { + writer.uint32(16).bool(message.rebootIfNeeded); + } + if (message.original !== undefined) { + TargetPathOriginal.encode( + message.original, + writer.uint32(802).fork() + ).ldelim(); + } + if (message.custom !== undefined) { + TargetPathCustom.encode( + message.custom, + writer.uint32(810).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): FilesRecoveryOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseFilesRecoveryOptions } as FilesRecoveryOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.overwrite = reader.int32() as any; + break; + case 2: + message.rebootIfNeeded = reader.bool(); + break; + case 100: + message.original = TargetPathOriginal.decode(reader, reader.uint32()); + break; + case 101: + message.custom = TargetPathCustom.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): FilesRecoveryOptions { + const message = { ...baseFilesRecoveryOptions } as FilesRecoveryOptions; + message.overwrite = + object.overwrite !== undefined && object.overwrite !== null + ? filesRecoveryOptions_OverwriteFromJSON(object.overwrite) + : 0; + message.rebootIfNeeded = + object.rebootIfNeeded !== undefined && object.rebootIfNeeded !== null + ? Boolean(object.rebootIfNeeded) + : false; + message.original = + object.original !== undefined && object.original !== null + ? TargetPathOriginal.fromJSON(object.original) + : undefined; + message.custom = + object.custom !== undefined && object.custom !== null + ? TargetPathCustom.fromJSON(object.custom) + : undefined; + return message; + }, + + toJSON(message: FilesRecoveryOptions): unknown { + const obj: any = {}; + message.overwrite !== undefined && + (obj.overwrite = filesRecoveryOptions_OverwriteToJSON(message.overwrite)); + message.rebootIfNeeded !== undefined && + (obj.rebootIfNeeded = message.rebootIfNeeded); + message.original !== undefined && + (obj.original = message.original + ? TargetPathOriginal.toJSON(message.original) + : undefined); + message.custom !== undefined && + (obj.custom = message.custom + ? TargetPathCustom.toJSON(message.custom) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): FilesRecoveryOptions { + const message = { ...baseFilesRecoveryOptions } as FilesRecoveryOptions; + message.overwrite = object.overwrite ?? 0; + message.rebootIfNeeded = object.rebootIfNeeded ?? false; + message.original = + object.original !== undefined && object.original !== null + ? TargetPathOriginal.fromPartial(object.original) + : undefined; + message.custom = + object.custom !== undefined && object.custom !== null + ? TargetPathCustom.fromPartial(object.custom) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(FilesRecoveryOptions.$type, FilesRecoveryOptions); + +const baseStartFilesRecoveryRequest: object = { + $type: "yandex.cloud.backup.v1.StartFilesRecoveryRequest", + computeInstanceId: "", + backupId: "", + sourceIds: "", +}; + +export const StartFilesRecoveryRequest = { + $type: "yandex.cloud.backup.v1.StartFilesRecoveryRequest" as const, + + encode( + message: StartFilesRecoveryRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } + if (message.opts !== undefined) { + FilesRecoveryOptions.encode( + message.opts, + writer.uint32(26).fork() + ).ldelim(); + } + for (const v of message.sourceIds) { + writer.uint32(34).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): StartFilesRecoveryRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseStartFilesRecoveryRequest, + } as StartFilesRecoveryRequest; + message.sourceIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.backupId = reader.string(); + break; + case 3: + message.opts = FilesRecoveryOptions.decode(reader, reader.uint32()); + break; + case 4: + message.sourceIds.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StartFilesRecoveryRequest { + const message = { + ...baseStartFilesRecoveryRequest, + } as StartFilesRecoveryRequest; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.opts = + object.opts !== undefined && object.opts !== null + ? FilesRecoveryOptions.fromJSON(object.opts) + : undefined; + message.sourceIds = (object.sourceIds ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: StartFilesRecoveryRequest): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.backupId !== undefined && (obj.backupId = message.backupId); + message.opts !== undefined && + (obj.opts = message.opts + ? FilesRecoveryOptions.toJSON(message.opts) + : undefined); + if (message.sourceIds) { + obj.sourceIds = message.sourceIds.map((e) => e); + } else { + obj.sourceIds = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): StartFilesRecoveryRequest { + const message = { + ...baseStartFilesRecoveryRequest, + } as StartFilesRecoveryRequest; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.backupId = object.backupId ?? ""; + message.opts = + object.opts !== undefined && object.opts !== null + ? FilesRecoveryOptions.fromPartial(object.opts) + : undefined; + message.sourceIds = object.sourceIds?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + StartFilesRecoveryRequest.$type, + StartFilesRecoveryRequest +); + +const baseStartFilesRecoveryMetadata: object = { + $type: "yandex.cloud.backup.v1.StartFilesRecoveryMetadata", + progressPercentage: 0, + computeInstanceId: "", + backupId: "", + sourceIds: "", +}; + +export const StartFilesRecoveryMetadata = { + $type: "yandex.cloud.backup.v1.StartFilesRecoveryMetadata" as const, + + encode( + message: StartFilesRecoveryMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.progressPercentage !== 0) { + writer.uint32(9).double(message.progressPercentage); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + if (message.backupId !== "") { + writer.uint32(26).string(message.backupId); + } + for (const v of message.sourceIds) { + writer.uint32(34).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): StartFilesRecoveryMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseStartFilesRecoveryMetadata, + } as StartFilesRecoveryMetadata; + message.sourceIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.progressPercentage = reader.double(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + case 3: + message.backupId = reader.string(); + break; + case 4: + message.sourceIds.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StartFilesRecoveryMetadata { + const message = { + ...baseStartFilesRecoveryMetadata, + } as StartFilesRecoveryMetadata; + message.progressPercentage = + object.progressPercentage !== undefined && + object.progressPercentage !== null + ? Number(object.progressPercentage) + : 0; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.sourceIds = (object.sourceIds ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: StartFilesRecoveryMetadata): unknown { + const obj: any = {}; + message.progressPercentage !== undefined && + (obj.progressPercentage = message.progressPercentage); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.backupId !== undefined && (obj.backupId = message.backupId); + if (message.sourceIds) { + obj.sourceIds = message.sourceIds.map((e) => e); + } else { + obj.sourceIds = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): StartFilesRecoveryMetadata { + const message = { + ...baseStartFilesRecoveryMetadata, + } as StartFilesRecoveryMetadata; + message.progressPercentage = object.progressPercentage ?? 0; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.backupId = object.backupId ?? ""; + message.sourceIds = object.sourceIds?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + StartFilesRecoveryMetadata.$type, + StartFilesRecoveryMetadata +); + +const baseDeleteBackupRequest: object = { + $type: "yandex.cloud.backup.v1.DeleteBackupRequest", + computeInstanceId: "", + backupId: "", +}; + +export const DeleteBackupRequest = { + $type: "yandex.cloud.backup.v1.DeleteBackupRequest" as const, + + encode( + message: DeleteBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBackupRequest { + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: DeleteBackupRequest): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBackupRequest { + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBackupRequest.$type, DeleteBackupRequest); + +const baseDeleteBackupMetadata: object = { + $type: "yandex.cloud.backup.v1.DeleteBackupMetadata", + computeInstanceId: "", + backupId: "", +}; + +export const DeleteBackupMetadata = { + $type: "yandex.cloud.backup.v1.DeleteBackupMetadata" as const, + + encode( + message: DeleteBackupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBackupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBackupMetadata { + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: DeleteBackupMetadata): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBackupMetadata { + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBackupMetadata.$type, DeleteBackupMetadata); + +/** A set of methods for managing [backups](/docs/backup/concepts/backup). */ +export const BackupServiceService = { + /** List backups using filters. */ + list: { + path: "/yandex.cloud.backup.v1.BackupService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListBackupsRequest) => + Buffer.from(ListBackupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListBackupsRequest.decode(value), + responseSerialize: (value: ListBackupsResponse) => + Buffer.from(ListBackupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListBackupsResponse.decode(value), + }, + /** + * List archives that holds backups for specified folder or + * specified [Compute Cloud instance](/docs/backup/concepts/vm-connection#os). + */ + listArchives: { + path: "/yandex.cloud.backup.v1.BackupService/ListArchives", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListArchivesRequest) => + Buffer.from(ListArchivesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListArchivesRequest.decode(value), + responseSerialize: (value: ListArchivesResponse) => + Buffer.from(ListArchivesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListArchivesResponse.decode(value), + }, + /** ListFiles of the backup. */ + listFiles: { + path: "/yandex.cloud.backup.v1.BackupService/ListFiles", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListFilesRequest) => + Buffer.from(ListFilesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListFilesRequest.decode(value), + responseSerialize: (value: ListFilesResponse) => + Buffer.from(ListFilesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListFilesResponse.decode(value), + }, + /** Get backup by its id. */ + get: { + path: "/yandex.cloud.backup.v1.BackupService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetBackupRequest) => + Buffer.from(GetBackupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetBackupRequest.decode(value), + responseSerialize: (value: Backup) => + Buffer.from(Backup.encode(value).finish()), + responseDeserialize: (value: Buffer) => Backup.decode(value), + }, + /** + * Start recovery process of specified backup to specific Compute Cloud instance. + * + * For details, see [Restoring a VM from a backup](/docs/backup/operations/backup-vm/recover). + */ + startRecovery: { + path: "/yandex.cloud.backup.v1.BackupService/StartRecovery", + requestStream: false, + responseStream: false, + requestSerialize: (value: StartRecoveryRequest) => + Buffer.from(StartRecoveryRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => StartRecoveryRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** StartFilesRecovery runs recovery process of selected files to specific Compute Cloud instance. */ + startFilesRecovery: { + path: "/yandex.cloud.backup.v1.BackupService/StartFilesRecovery", + requestStream: false, + responseStream: false, + requestSerialize: (value: StartFilesRecoveryRequest) => + Buffer.from(StartFilesRecoveryRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + StartFilesRecoveryRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Delete specific backup. */ + delete: { + path: "/yandex.cloud.backup.v1.BackupService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteBackupRequest) => + Buffer.from(DeleteBackupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteBackupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface BackupServiceServer extends UntypedServiceImplementation { + /** List backups using filters. */ + list: handleUnaryCall; + /** + * List archives that holds backups for specified folder or + * specified [Compute Cloud instance](/docs/backup/concepts/vm-connection#os). + */ + listArchives: handleUnaryCall; + /** ListFiles of the backup. */ + listFiles: handleUnaryCall; + /** Get backup by its id. */ + get: handleUnaryCall; + /** + * Start recovery process of specified backup to specific Compute Cloud instance. + * + * For details, see [Restoring a VM from a backup](/docs/backup/operations/backup-vm/recover). + */ + startRecovery: handleUnaryCall; + /** StartFilesRecovery runs recovery process of selected files to specific Compute Cloud instance. */ + startFilesRecovery: handleUnaryCall; + /** Delete specific backup. */ + delete: handleUnaryCall; +} + +export interface BackupServiceClient extends Client { + /** List backups using filters. */ + list( + request: ListBackupsRequest, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + /** + * List archives that holds backups for specified folder or + * specified [Compute Cloud instance](/docs/backup/concepts/vm-connection#os). + */ + listArchives( + request: ListArchivesRequest, + callback: ( + error: ServiceError | null, + response: ListArchivesResponse + ) => void + ): ClientUnaryCall; + listArchives( + request: ListArchivesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListArchivesResponse + ) => void + ): ClientUnaryCall; + listArchives( + request: ListArchivesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListArchivesResponse + ) => void + ): ClientUnaryCall; + /** ListFiles of the backup. */ + listFiles( + request: ListFilesRequest, + callback: (error: ServiceError | null, response: ListFilesResponse) => void + ): ClientUnaryCall; + listFiles( + request: ListFilesRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: ListFilesResponse) => void + ): ClientUnaryCall; + listFiles( + request: ListFilesRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: ListFilesResponse) => void + ): ClientUnaryCall; + /** Get backup by its id. */ + get( + request: GetBackupRequest, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + /** + * Start recovery process of specified backup to specific Compute Cloud instance. + * + * For details, see [Restoring a VM from a backup](/docs/backup/operations/backup-vm/recover). + */ + startRecovery( + request: StartRecoveryRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + startRecovery( + request: StartRecoveryRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + startRecovery( + request: StartRecoveryRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** StartFilesRecovery runs recovery process of selected files to specific Compute Cloud instance. */ + startFilesRecovery( + request: StartFilesRecoveryRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + startFilesRecovery( + request: StartFilesRecoveryRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + startFilesRecovery( + request: StartFilesRecoveryRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Delete specific backup. */ + delete( + request: DeleteBackupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteBackupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteBackupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const BackupServiceClient = makeGenericClientConstructor( + BackupServiceService, + "yandex.cloud.backup.v1.BackupService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): BackupServiceClient; + service: typeof BackupServiceService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/backup/v1/policy.ts b/src/generated/yandex/cloud/backup/v1/policy.ts new file mode 100644 index 00000000..4ad96929 --- /dev/null +++ b/src/generated/yandex/cloud/backup/v1/policy.ts @@ -0,0 +1,2913 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.backup.v1"; + +/** + * Format of the backup in policy. For backup locations that can be browsed + * with a file manager, the backup format determines the number of files and + * their extension. + */ +export enum Format { + FORMAT_UNSPECIFIED = 0, + /** VERSION_11 - A legacy backup format used in older versions. It's not recommended to use. */ + VERSION_11 = 1, + /** VERSION_12 - A new format recommended in most cases for fast backup and recovery. */ + VERSION_12 = 2, + /** + * AUTO - Automatic version selection. Will be used version 12 unless the protection + * plan (policy) appends backups to the ones created by earlier product + * versions. + */ + AUTO = 3, + UNRECOGNIZED = -1, +} + +export function formatFromJSON(object: any): Format { + switch (object) { + case 0: + case "FORMAT_UNSPECIFIED": + return Format.FORMAT_UNSPECIFIED; + case 1: + case "VERSION_11": + return Format.VERSION_11; + case 2: + case "VERSION_12": + return Format.VERSION_12; + case 3: + case "AUTO": + return Format.AUTO; + case -1: + case "UNRECOGNIZED": + default: + return Format.UNRECOGNIZED; + } +} + +export function formatToJSON(object: Format): string { + switch (object) { + case Format.FORMAT_UNSPECIFIED: + return "FORMAT_UNSPECIFIED"; + case Format.VERSION_11: + return "VERSION_11"; + case Format.VERSION_12: + return "VERSION_12"; + case Format.AUTO: + return "AUTO"; + default: + return "UNKNOWN"; + } +} + +export interface Policy { + $type: "yandex.cloud.backup.v1.Policy"; + /** Policy ID. */ + id: string; + /** Policy name. */ + name: string; + createdAt?: Date; + updatedAt?: Date; + /** If this field is true, it means that the policy is enabled. */ + enabled: boolean; + /** Set of policy settings */ + settings?: PolicySettings; + /** ID of the folder that the policy belongs to. */ + folderId: string; +} + +/** Set of policy settings */ +export interface PolicySettings { + $type: "yandex.cloud.backup.v1.PolicySettings"; + /** Archive compression level. */ + compression: PolicySettings_Compression; + /** Format of the Acronis backup archive. */ + format: Format; + /** If true, snapshots of multiple volumes will be taken simultaneously. */ + multiVolumeSnapshottingEnabled: boolean; + /** If true, the file security settings will be preserved. */ + preserveFileSecuritySettings: boolean; + /** Configuration of retries on recoverable errors during the backup operations like reconnection to destination. No attempts to fix recoverable errors will be made if retry configuration is not set. */ + reattempts?: PolicySettings_RetriesConfiguration; + /** If true, a user interaction will be avoided when possible. Equals to false if value is not specified. */ + silentModeEnabled: boolean; + /** Determines the size to split backups on. Splitting is not performed if value is not specified. */ + splitting?: PolicySettings_Splitting; + /** Configuration of retries on errors during the creation of the virtual machine snapshot. No attempts to fix recoverable errors will be made if retry configuration is not set. */ + vmSnapshotReattempts?: PolicySettings_RetriesConfiguration; + /** Settings for the Volume Shadow Copy Service (VSS) provider. If not set, no VSS provider is used. */ + vss?: PolicySettings_VolumeShadowCopyServiceSettings; + /** The archive properties. */ + archive?: PolicySettings_ArchiveProperties; + /** Time windows for performance limitations of backup and storage maintenance operations. */ + performanceWindow?: PolicySettings_PerformanceWindow; + /** Configuration of backup retention rules. */ + retention?: PolicySettings_Retention; + /** Configuration of the backup schedule. */ + scheduling?: PolicySettings_Scheduling; + /** A configuration of Changed Block Tracking (CBT). */ + cbt: PolicySettings_ChangedBlockTracking; + /** If true, determines whether a file has changed by the file size and timestamp. Otherwise, the entire file contents are compared to those stored in the backup. */ + fastBackupEnabled: boolean; + /** If true, a quiesced snapshot of the virtual machine will be taken. */ + quiesceSnapshottingEnabled: boolean; +} + +/** Compression rate of the backups. */ +export enum PolicySettings_Compression { + COMPRESSION_UNSPECIFIED = 0, + NORMAL = 1, + HIGH = 2, + MAX = 3, + OFF = 4, + UNRECOGNIZED = -1, +} + +export function policySettings_CompressionFromJSON( + object: any +): PolicySettings_Compression { + switch (object) { + case 0: + case "COMPRESSION_UNSPECIFIED": + return PolicySettings_Compression.COMPRESSION_UNSPECIFIED; + case 1: + case "NORMAL": + return PolicySettings_Compression.NORMAL; + case 2: + case "HIGH": + return PolicySettings_Compression.HIGH; + case 3: + case "MAX": + return PolicySettings_Compression.MAX; + case 4: + case "OFF": + return PolicySettings_Compression.OFF; + case -1: + case "UNRECOGNIZED": + default: + return PolicySettings_Compression.UNRECOGNIZED; + } +} + +export function policySettings_CompressionToJSON( + object: PolicySettings_Compression +): string { + switch (object) { + case PolicySettings_Compression.COMPRESSION_UNSPECIFIED: + return "COMPRESSION_UNSPECIFIED"; + case PolicySettings_Compression.NORMAL: + return "NORMAL"; + case PolicySettings_Compression.HIGH: + return "HIGH"; + case PolicySettings_Compression.MAX: + return "MAX"; + case PolicySettings_Compression.OFF: + return "OFF"; + default: + return "UNKNOWN"; + } +} + +export enum PolicySettings_RepeatePeriod { + REPEATE_PERIOD_UNSPECIFIED = 0, + HOURLY = 1, + DAILY = 2, + WEEKLY = 3, + MONTHLY = 4, + UNRECOGNIZED = -1, +} + +export function policySettings_RepeatePeriodFromJSON( + object: any +): PolicySettings_RepeatePeriod { + switch (object) { + case 0: + case "REPEATE_PERIOD_UNSPECIFIED": + return PolicySettings_RepeatePeriod.REPEATE_PERIOD_UNSPECIFIED; + case 1: + case "HOURLY": + return PolicySettings_RepeatePeriod.HOURLY; + case 2: + case "DAILY": + return PolicySettings_RepeatePeriod.DAILY; + case 3: + case "WEEKLY": + return PolicySettings_RepeatePeriod.WEEKLY; + case 4: + case "MONTHLY": + return PolicySettings_RepeatePeriod.MONTHLY; + case -1: + case "UNRECOGNIZED": + default: + return PolicySettings_RepeatePeriod.UNRECOGNIZED; + } +} + +export function policySettings_RepeatePeriodToJSON( + object: PolicySettings_RepeatePeriod +): string { + switch (object) { + case PolicySettings_RepeatePeriod.REPEATE_PERIOD_UNSPECIFIED: + return "REPEATE_PERIOD_UNSPECIFIED"; + case PolicySettings_RepeatePeriod.HOURLY: + return "HOURLY"; + case PolicySettings_RepeatePeriod.DAILY: + return "DAILY"; + case PolicySettings_RepeatePeriod.WEEKLY: + return "WEEKLY"; + case PolicySettings_RepeatePeriod.MONTHLY: + return "MONTHLY"; + default: + return "UNKNOWN"; + } +} + +export enum PolicySettings_Day { + DAY_UNSPECIFIED = 0, + MONDAY = 1, + TUESDAY = 2, + WEDNESDAY = 3, + THURSDAY = 4, + FRIDAY = 5, + SATURDAY = 6, + SUNDAY = 7, + UNRECOGNIZED = -1, +} + +export function policySettings_DayFromJSON(object: any): PolicySettings_Day { + switch (object) { + case 0: + case "DAY_UNSPECIFIED": + return PolicySettings_Day.DAY_UNSPECIFIED; + case 1: + case "MONDAY": + return PolicySettings_Day.MONDAY; + case 2: + case "TUESDAY": + return PolicySettings_Day.TUESDAY; + case 3: + case "WEDNESDAY": + return PolicySettings_Day.WEDNESDAY; + case 4: + case "THURSDAY": + return PolicySettings_Day.THURSDAY; + case 5: + case "FRIDAY": + return PolicySettings_Day.FRIDAY; + case 6: + case "SATURDAY": + return PolicySettings_Day.SATURDAY; + case 7: + case "SUNDAY": + return PolicySettings_Day.SUNDAY; + case -1: + case "UNRECOGNIZED": + default: + return PolicySettings_Day.UNRECOGNIZED; + } +} + +export function policySettings_DayToJSON(object: PolicySettings_Day): string { + switch (object) { + case PolicySettings_Day.DAY_UNSPECIFIED: + return "DAY_UNSPECIFIED"; + case PolicySettings_Day.MONDAY: + return "MONDAY"; + case PolicySettings_Day.TUESDAY: + return "TUESDAY"; + case PolicySettings_Day.WEDNESDAY: + return "WEDNESDAY"; + case PolicySettings_Day.THURSDAY: + return "THURSDAY"; + case PolicySettings_Day.FRIDAY: + return "FRIDAY"; + case PolicySettings_Day.SATURDAY: + return "SATURDAY"; + case PolicySettings_Day.SUNDAY: + return "SUNDAY"; + default: + return "UNKNOWN"; + } +} + +export enum PolicySettings_ChangedBlockTracking { + CHANGED_BLOCK_TRACKING_UNSPECIFIED = 0, + USE_IF_ENABLED = 1, + ENABLE_AND_USE = 2, + DO_NOT_USE = 3, + UNRECOGNIZED = -1, +} + +export function policySettings_ChangedBlockTrackingFromJSON( + object: any +): PolicySettings_ChangedBlockTracking { + switch (object) { + case 0: + case "CHANGED_BLOCK_TRACKING_UNSPECIFIED": + return PolicySettings_ChangedBlockTracking.CHANGED_BLOCK_TRACKING_UNSPECIFIED; + case 1: + case "USE_IF_ENABLED": + return PolicySettings_ChangedBlockTracking.USE_IF_ENABLED; + case 2: + case "ENABLE_AND_USE": + return PolicySettings_ChangedBlockTracking.ENABLE_AND_USE; + case 3: + case "DO_NOT_USE": + return PolicySettings_ChangedBlockTracking.DO_NOT_USE; + case -1: + case "UNRECOGNIZED": + default: + return PolicySettings_ChangedBlockTracking.UNRECOGNIZED; + } +} + +export function policySettings_ChangedBlockTrackingToJSON( + object: PolicySettings_ChangedBlockTracking +): string { + switch (object) { + case PolicySettings_ChangedBlockTracking.CHANGED_BLOCK_TRACKING_UNSPECIFIED: + return "CHANGED_BLOCK_TRACKING_UNSPECIFIED"; + case PolicySettings_ChangedBlockTracking.USE_IF_ENABLED: + return "USE_IF_ENABLED"; + case PolicySettings_ChangedBlockTracking.ENABLE_AND_USE: + return "ENABLE_AND_USE"; + case PolicySettings_ChangedBlockTracking.DO_NOT_USE: + return "DO_NOT_USE"; + default: + return "UNKNOWN"; + } +} + +export interface PolicySettings_Interval { + $type: "yandex.cloud.backup.v1.PolicySettings.Interval"; + /** A type of the interval. */ + type: PolicySettings_Interval_Type; + /** The amount of value specified in `Interval.Type`. */ + count: number; +} + +export enum PolicySettings_Interval_Type { + TYPE_UNSPECIFIED = 0, + SECONDS = 1, + MINUTES = 2, + HOURS = 3, + DAYS = 4, + WEEKS = 5, + MONTHS = 6, + UNRECOGNIZED = -1, +} + +export function policySettings_Interval_TypeFromJSON( + object: any +): PolicySettings_Interval_Type { + switch (object) { + case 0: + case "TYPE_UNSPECIFIED": + return PolicySettings_Interval_Type.TYPE_UNSPECIFIED; + case 1: + case "SECONDS": + return PolicySettings_Interval_Type.SECONDS; + case 2: + case "MINUTES": + return PolicySettings_Interval_Type.MINUTES; + case 3: + case "HOURS": + return PolicySettings_Interval_Type.HOURS; + case 4: + case "DAYS": + return PolicySettings_Interval_Type.DAYS; + case 5: + case "WEEKS": + return PolicySettings_Interval_Type.WEEKS; + case 6: + case "MONTHS": + return PolicySettings_Interval_Type.MONTHS; + case -1: + case "UNRECOGNIZED": + default: + return PolicySettings_Interval_Type.UNRECOGNIZED; + } +} + +export function policySettings_Interval_TypeToJSON( + object: PolicySettings_Interval_Type +): string { + switch (object) { + case PolicySettings_Interval_Type.TYPE_UNSPECIFIED: + return "TYPE_UNSPECIFIED"; + case PolicySettings_Interval_Type.SECONDS: + return "SECONDS"; + case PolicySettings_Interval_Type.MINUTES: + return "MINUTES"; + case PolicySettings_Interval_Type.HOURS: + return "HOURS"; + case PolicySettings_Interval_Type.DAYS: + return "DAYS"; + case PolicySettings_Interval_Type.WEEKS: + return "WEEKS"; + case PolicySettings_Interval_Type.MONTHS: + return "MONTHS"; + default: + return "UNKNOWN"; + } +} + +export interface PolicySettings_RetriesConfiguration { + $type: "yandex.cloud.backup.v1.PolicySettings.RetriesConfiguration"; + /** If true, enables retry on errors. */ + enabled: boolean; + /** An interval between retry attempts. */ + interval?: PolicySettings_Interval; + /** + * Max number of retry attempts. Operation will be considered as failed + * when max number of retry attempts is reached. + */ + maxAttempts: number; +} + +export interface PolicySettings_Splitting { + $type: "yandex.cloud.backup.v1.PolicySettings.Splitting"; + /** The size of split backup file in bytes. */ + size: number; +} + +/** + * Settings for Volume Shadow Copy Services which allows to notify + * VSS-aware applications that backup is about to start. This will + * ensure the consistent state of all data used by the applications. + */ +export interface PolicySettings_VolumeShadowCopyServiceSettings { + $type: "yandex.cloud.backup.v1.PolicySettings.VolumeShadowCopyServiceSettings"; + /** If true, the VSS will be enabled. */ + enabled: boolean; + /** A type of VSS provider to use in backup. */ + provider: PolicySettings_VolumeShadowCopyServiceSettings_VSSProvider; +} + +export enum PolicySettings_VolumeShadowCopyServiceSettings_VSSProvider { + VSS_PROVIDER_UNSPECIFIED = 0, + NATIVE = 1, + TARGET_SYSTEM_DEFINED = 2, + UNRECOGNIZED = -1, +} + +export function policySettings_VolumeShadowCopyServiceSettings_VSSProviderFromJSON( + object: any +): PolicySettings_VolumeShadowCopyServiceSettings_VSSProvider { + switch (object) { + case 0: + case "VSS_PROVIDER_UNSPECIFIED": + return PolicySettings_VolumeShadowCopyServiceSettings_VSSProvider.VSS_PROVIDER_UNSPECIFIED; + case 1: + case "NATIVE": + return PolicySettings_VolumeShadowCopyServiceSettings_VSSProvider.NATIVE; + case 2: + case "TARGET_SYSTEM_DEFINED": + return PolicySettings_VolumeShadowCopyServiceSettings_VSSProvider.TARGET_SYSTEM_DEFINED; + case -1: + case "UNRECOGNIZED": + default: + return PolicySettings_VolumeShadowCopyServiceSettings_VSSProvider.UNRECOGNIZED; + } +} + +export function policySettings_VolumeShadowCopyServiceSettings_VSSProviderToJSON( + object: PolicySettings_VolumeShadowCopyServiceSettings_VSSProvider +): string { + switch (object) { + case PolicySettings_VolumeShadowCopyServiceSettings_VSSProvider.VSS_PROVIDER_UNSPECIFIED: + return "VSS_PROVIDER_UNSPECIFIED"; + case PolicySettings_VolumeShadowCopyServiceSettings_VSSProvider.NATIVE: + return "NATIVE"; + case PolicySettings_VolumeShadowCopyServiceSettings_VSSProvider.TARGET_SYSTEM_DEFINED: + return "TARGET_SYSTEM_DEFINED"; + default: + return "UNKNOWN"; + } +} + +export interface PolicySettings_ArchiveProperties { + $type: "yandex.cloud.backup.v1.PolicySettings.ArchiveProperties"; + /** + * The name of the generated archive. The name may use the following variables: `[Machine Name]`, `[Plan ID]`, `[Plan Name]`, `[Unique ID]`, `[Virtualization Server Type]`. + * Default value: `[Machine Name]-[Plan ID]-[Unique ID]A`. + */ + name: string; +} + +export interface PolicySettings_PerformanceWindow { + $type: "yandex.cloud.backup.v1.PolicySettings.PerformanceWindow"; + /** If true, the time windows will be enabled. */ + enabled: boolean; +} + +export interface PolicySettings_TimeOfDay { + $type: "yandex.cloud.backup.v1.PolicySettings.TimeOfDay"; + /** Hours. */ + hour: number; + /** Minutes. */ + minute: number; +} + +export interface PolicySettings_Retention { + $type: "yandex.cloud.backup.v1.PolicySettings.Retention"; + /** A list of retention rules. */ + rules: PolicySettings_Retention_RetentionRule[]; + /** If true, retention rules will be applied after backup is finished. */ + afterBackup: boolean; +} + +export interface PolicySettings_Retention_RetentionRule { + $type: "yandex.cloud.backup.v1.PolicySettings.Retention.RetentionRule"; + /** A list of backup sets where rules are effective. */ + backupSet: PolicySettings_RepeatePeriod[]; + maxAge?: PolicySettings_Interval | undefined; + maxCount: number | undefined; +} + +export interface PolicySettings_Scheduling { + $type: "yandex.cloud.backup.v1.PolicySettings.Scheduling"; + /** A list of schedules with backup sets that compose the whole scheme. */ + backupSets: PolicySettings_Scheduling_BackupSet[]; + /** If true, the backup schedule will be enabled. */ + enabled: boolean; + /** Max number of backup processes allowed to run in parallel. Unlimited if not set. */ + maxParallelBackups: number; + /** Configuration of the random delay between the execution of parallel tasks. */ + randMaxDelay?: PolicySettings_Interval; + /** A backup scheme. Available values: `simple`, `always_full`, `always_incremental`, `weekly_incremental`, `weekly_full_daily_incremental`, `custom`, `cdp`. */ + scheme: PolicySettings_Scheduling_Scheme; + /** A day of week to start weekly backups. */ + weeklyBackupDay: PolicySettings_Day; +} + +/** Scheme of backups. */ +export enum PolicySettings_Scheduling_Scheme { + SCHEME_UNSPECIFIED = 0, + SIMPLE = 1, + ALWAYS_FULL = 2, + ALWAYS_INCREMENTAL = 3, + WEEKLY_INCREMENTAL = 4, + WEEKLY_FULL_DAILY_INCREMENTAL = 5, + /** + * CUSTOM - Custom will require to specify schedules for full, differential + * and incremental backups additionally. + */ + CUSTOM = 6, + CDP = 7, + UNRECOGNIZED = -1, +} + +export function policySettings_Scheduling_SchemeFromJSON( + object: any +): PolicySettings_Scheduling_Scheme { + switch (object) { + case 0: + case "SCHEME_UNSPECIFIED": + return PolicySettings_Scheduling_Scheme.SCHEME_UNSPECIFIED; + case 1: + case "SIMPLE": + return PolicySettings_Scheduling_Scheme.SIMPLE; + case 2: + case "ALWAYS_FULL": + return PolicySettings_Scheduling_Scheme.ALWAYS_FULL; + case 3: + case "ALWAYS_INCREMENTAL": + return PolicySettings_Scheduling_Scheme.ALWAYS_INCREMENTAL; + case 4: + case "WEEKLY_INCREMENTAL": + return PolicySettings_Scheduling_Scheme.WEEKLY_INCREMENTAL; + case 5: + case "WEEKLY_FULL_DAILY_INCREMENTAL": + return PolicySettings_Scheduling_Scheme.WEEKLY_FULL_DAILY_INCREMENTAL; + case 6: + case "CUSTOM": + return PolicySettings_Scheduling_Scheme.CUSTOM; + case 7: + case "CDP": + return PolicySettings_Scheduling_Scheme.CDP; + case -1: + case "UNRECOGNIZED": + default: + return PolicySettings_Scheduling_Scheme.UNRECOGNIZED; + } +} + +export function policySettings_Scheduling_SchemeToJSON( + object: PolicySettings_Scheduling_Scheme +): string { + switch (object) { + case PolicySettings_Scheduling_Scheme.SCHEME_UNSPECIFIED: + return "SCHEME_UNSPECIFIED"; + case PolicySettings_Scheduling_Scheme.SIMPLE: + return "SIMPLE"; + case PolicySettings_Scheduling_Scheme.ALWAYS_FULL: + return "ALWAYS_FULL"; + case PolicySettings_Scheduling_Scheme.ALWAYS_INCREMENTAL: + return "ALWAYS_INCREMENTAL"; + case PolicySettings_Scheduling_Scheme.WEEKLY_INCREMENTAL: + return "WEEKLY_INCREMENTAL"; + case PolicySettings_Scheduling_Scheme.WEEKLY_FULL_DAILY_INCREMENTAL: + return "WEEKLY_FULL_DAILY_INCREMENTAL"; + case PolicySettings_Scheduling_Scheme.CUSTOM: + return "CUSTOM"; + case PolicySettings_Scheduling_Scheme.CDP: + return "CDP"; + default: + return "UNKNOWN"; + } +} + +export interface PolicySettings_Scheduling_BackupSet { + $type: "yandex.cloud.backup.v1.PolicySettings.Scheduling.BackupSet"; + time?: PolicySettings_Scheduling_BackupSet_Time | undefined; + sinceLastExecTime?: + | PolicySettings_Scheduling_BackupSet_SinceLastExecTime + | undefined; +} + +export interface PolicySettings_Scheduling_BackupSet_Time { + $type: "yandex.cloud.backup.v1.PolicySettings.Scheduling.BackupSet.Time"; + /** Days in a week to perform a backup. */ + weekdays: PolicySettings_Day[]; + /** Time to repeat the backup. */ + repeatAt: PolicySettings_TimeOfDay[]; + /** Frequency of backup repetition. */ + repeatEvery?: PolicySettings_Interval; + /** The start time of the backup time interval. */ + timeFrom?: PolicySettings_TimeOfDay; + /** The end time of the backup time interval. */ + timeTo?: PolicySettings_TimeOfDay; + /** + * Days in a month to perform a backup. + * Allowed values are from 1 to 31. + */ + monthdays: number[]; + /** + * If set to true, last day of month will activate + * the policy. + */ + includeLastDayOfMonth: boolean; + /** Set of values. Allowed values form 1 to 12. */ + months: number[]; + /** Possible types: `REPEATE_PERIOD_UNSPECIFIED`, `HOURLY`, `DAILY`, `WEEKLY`, `MONTHLY`. */ + type: PolicySettings_RepeatePeriod; +} + +export interface PolicySettings_Scheduling_BackupSet_SinceLastExecTime { + $type: "yandex.cloud.backup.v1.PolicySettings.Scheduling.BackupSet.SinceLastExecTime"; + /** The interval between backups. */ + delay?: PolicySettings_Interval; +} + +export interface PolicyApplication { + $type: "yandex.cloud.backup.v1.PolicyApplication"; + /** Policy ID. */ + policyId: string; + /** Compute Cloud instance ID. */ + computeInstanceId: string; + enabled: boolean; + status: PolicyApplication_Status; + createdAt?: Date; +} + +export enum PolicyApplication_Status { + STATUS_UNSPECIFIED = 0, + /** OK - Application is applied and everything is OK. */ + OK = 1, + /** RUNNING - Application is currently running. */ + RUNNING = 2, + /** DISABLED - Application is disabled. */ + DISABLED = 3, + UNRECOGNIZED = -1, +} + +export function policyApplication_StatusFromJSON( + object: any +): PolicyApplication_Status { + switch (object) { + case 0: + case "STATUS_UNSPECIFIED": + return PolicyApplication_Status.STATUS_UNSPECIFIED; + case 1: + case "OK": + return PolicyApplication_Status.OK; + case 2: + case "RUNNING": + return PolicyApplication_Status.RUNNING; + case 3: + case "DISABLED": + return PolicyApplication_Status.DISABLED; + case -1: + case "UNRECOGNIZED": + default: + return PolicyApplication_Status.UNRECOGNIZED; + } +} + +export function policyApplication_StatusToJSON( + object: PolicyApplication_Status +): string { + switch (object) { + case PolicyApplication_Status.STATUS_UNSPECIFIED: + return "STATUS_UNSPECIFIED"; + case PolicyApplication_Status.OK: + return "OK"; + case PolicyApplication_Status.RUNNING: + return "RUNNING"; + case PolicyApplication_Status.DISABLED: + return "DISABLED"; + default: + return "UNKNOWN"; + } +} + +const basePolicy: object = { + $type: "yandex.cloud.backup.v1.Policy", + id: "", + name: "", + enabled: false, + folderId: "", +}; + +export const Policy = { + $type: "yandex.cloud.backup.v1.Policy" as const, + + encode( + message: Policy, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.updatedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.updatedAt), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.enabled === true) { + writer.uint32(40).bool(message.enabled); + } + if (message.settings !== undefined) { + PolicySettings.encode( + message.settings, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.folderId !== "") { + writer.uint32(58).string(message.folderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Policy { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePolicy } as Policy; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.updatedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.enabled = reader.bool(); + break; + case 6: + message.settings = PolicySettings.decode(reader, reader.uint32()); + break; + case 7: + message.folderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Policy { + const message = { ...basePolicy } as Policy; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.updatedAt = + object.updatedAt !== undefined && object.updatedAt !== null + ? fromJsonTimestamp(object.updatedAt) + : undefined; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + message.settings = + object.settings !== undefined && object.settings !== null + ? PolicySettings.fromJSON(object.settings) + : undefined; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + return message; + }, + + toJSON(message: Policy): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.name !== undefined && (obj.name = message.name); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.updatedAt !== undefined && + (obj.updatedAt = message.updatedAt.toISOString()); + message.enabled !== undefined && (obj.enabled = message.enabled); + message.settings !== undefined && + (obj.settings = message.settings + ? PolicySettings.toJSON(message.settings) + : undefined); + message.folderId !== undefined && (obj.folderId = message.folderId); + return obj; + }, + + fromPartial, I>>(object: I): Policy { + const message = { ...basePolicy } as Policy; + message.id = object.id ?? ""; + message.name = object.name ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.updatedAt = object.updatedAt ?? undefined; + message.enabled = object.enabled ?? false; + message.settings = + object.settings !== undefined && object.settings !== null + ? PolicySettings.fromPartial(object.settings) + : undefined; + message.folderId = object.folderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Policy.$type, Policy); + +const basePolicySettings: object = { + $type: "yandex.cloud.backup.v1.PolicySettings", + compression: 0, + format: 0, + multiVolumeSnapshottingEnabled: false, + preserveFileSecuritySettings: false, + silentModeEnabled: false, + cbt: 0, + fastBackupEnabled: false, + quiesceSnapshottingEnabled: false, +}; + +export const PolicySettings = { + $type: "yandex.cloud.backup.v1.PolicySettings" as const, + + encode( + message: PolicySettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.compression !== 0) { + writer.uint32(8).int32(message.compression); + } + if (message.format !== 0) { + writer.uint32(16).int32(message.format); + } + if (message.multiVolumeSnapshottingEnabled === true) { + writer.uint32(24).bool(message.multiVolumeSnapshottingEnabled); + } + if (message.preserveFileSecuritySettings === true) { + writer.uint32(32).bool(message.preserveFileSecuritySettings); + } + if (message.reattempts !== undefined) { + PolicySettings_RetriesConfiguration.encode( + message.reattempts, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.silentModeEnabled === true) { + writer.uint32(48).bool(message.silentModeEnabled); + } + if (message.splitting !== undefined) { + PolicySettings_Splitting.encode( + message.splitting, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.vmSnapshotReattempts !== undefined) { + PolicySettings_RetriesConfiguration.encode( + message.vmSnapshotReattempts, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.vss !== undefined) { + PolicySettings_VolumeShadowCopyServiceSettings.encode( + message.vss, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.archive !== undefined) { + PolicySettings_ArchiveProperties.encode( + message.archive, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.performanceWindow !== undefined) { + PolicySettings_PerformanceWindow.encode( + message.performanceWindow, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.retention !== undefined) { + PolicySettings_Retention.encode( + message.retention, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.scheduling !== undefined) { + PolicySettings_Scheduling.encode( + message.scheduling, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.cbt !== 0) { + writer.uint32(128).int32(message.cbt); + } + if (message.fastBackupEnabled === true) { + writer.uint32(136).bool(message.fastBackupEnabled); + } + if (message.quiesceSnapshottingEnabled === true) { + writer.uint32(144).bool(message.quiesceSnapshottingEnabled); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PolicySettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePolicySettings } as PolicySettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.compression = reader.int32() as any; + break; + case 2: + message.format = reader.int32() as any; + break; + case 3: + message.multiVolumeSnapshottingEnabled = reader.bool(); + break; + case 4: + message.preserveFileSecuritySettings = reader.bool(); + break; + case 5: + message.reattempts = PolicySettings_RetriesConfiguration.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.silentModeEnabled = reader.bool(); + break; + case 7: + message.splitting = PolicySettings_Splitting.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.vmSnapshotReattempts = + PolicySettings_RetriesConfiguration.decode(reader, reader.uint32()); + break; + case 9: + message.vss = PolicySettings_VolumeShadowCopyServiceSettings.decode( + reader, + reader.uint32() + ); + break; + case 10: + message.archive = PolicySettings_ArchiveProperties.decode( + reader, + reader.uint32() + ); + break; + case 11: + message.performanceWindow = PolicySettings_PerformanceWindow.decode( + reader, + reader.uint32() + ); + break; + case 12: + message.retention = PolicySettings_Retention.decode( + reader, + reader.uint32() + ); + break; + case 15: + message.scheduling = PolicySettings_Scheduling.decode( + reader, + reader.uint32() + ); + break; + case 16: + message.cbt = reader.int32() as any; + break; + case 17: + message.fastBackupEnabled = reader.bool(); + break; + case 18: + message.quiesceSnapshottingEnabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings { + const message = { ...basePolicySettings } as PolicySettings; + message.compression = + object.compression !== undefined && object.compression !== null + ? policySettings_CompressionFromJSON(object.compression) + : 0; + message.format = + object.format !== undefined && object.format !== null + ? formatFromJSON(object.format) + : 0; + message.multiVolumeSnapshottingEnabled = + object.multiVolumeSnapshottingEnabled !== undefined && + object.multiVolumeSnapshottingEnabled !== null + ? Boolean(object.multiVolumeSnapshottingEnabled) + : false; + message.preserveFileSecuritySettings = + object.preserveFileSecuritySettings !== undefined && + object.preserveFileSecuritySettings !== null + ? Boolean(object.preserveFileSecuritySettings) + : false; + message.reattempts = + object.reattempts !== undefined && object.reattempts !== null + ? PolicySettings_RetriesConfiguration.fromJSON(object.reattempts) + : undefined; + message.silentModeEnabled = + object.silentModeEnabled !== undefined && + object.silentModeEnabled !== null + ? Boolean(object.silentModeEnabled) + : false; + message.splitting = + object.splitting !== undefined && object.splitting !== null + ? PolicySettings_Splitting.fromJSON(object.splitting) + : undefined; + message.vmSnapshotReattempts = + object.vmSnapshotReattempts !== undefined && + object.vmSnapshotReattempts !== null + ? PolicySettings_RetriesConfiguration.fromJSON( + object.vmSnapshotReattempts + ) + : undefined; + message.vss = + object.vss !== undefined && object.vss !== null + ? PolicySettings_VolumeShadowCopyServiceSettings.fromJSON(object.vss) + : undefined; + message.archive = + object.archive !== undefined && object.archive !== null + ? PolicySettings_ArchiveProperties.fromJSON(object.archive) + : undefined; + message.performanceWindow = + object.performanceWindow !== undefined && + object.performanceWindow !== null + ? PolicySettings_PerformanceWindow.fromJSON(object.performanceWindow) + : undefined; + message.retention = + object.retention !== undefined && object.retention !== null + ? PolicySettings_Retention.fromJSON(object.retention) + : undefined; + message.scheduling = + object.scheduling !== undefined && object.scheduling !== null + ? PolicySettings_Scheduling.fromJSON(object.scheduling) + : undefined; + message.cbt = + object.cbt !== undefined && object.cbt !== null + ? policySettings_ChangedBlockTrackingFromJSON(object.cbt) + : 0; + message.fastBackupEnabled = + object.fastBackupEnabled !== undefined && + object.fastBackupEnabled !== null + ? Boolean(object.fastBackupEnabled) + : false; + message.quiesceSnapshottingEnabled = + object.quiesceSnapshottingEnabled !== undefined && + object.quiesceSnapshottingEnabled !== null + ? Boolean(object.quiesceSnapshottingEnabled) + : false; + return message; + }, + + toJSON(message: PolicySettings): unknown { + const obj: any = {}; + message.compression !== undefined && + (obj.compression = policySettings_CompressionToJSON(message.compression)); + message.format !== undefined && (obj.format = formatToJSON(message.format)); + message.multiVolumeSnapshottingEnabled !== undefined && + (obj.multiVolumeSnapshottingEnabled = + message.multiVolumeSnapshottingEnabled); + message.preserveFileSecuritySettings !== undefined && + (obj.preserveFileSecuritySettings = message.preserveFileSecuritySettings); + message.reattempts !== undefined && + (obj.reattempts = message.reattempts + ? PolicySettings_RetriesConfiguration.toJSON(message.reattempts) + : undefined); + message.silentModeEnabled !== undefined && + (obj.silentModeEnabled = message.silentModeEnabled); + message.splitting !== undefined && + (obj.splitting = message.splitting + ? PolicySettings_Splitting.toJSON(message.splitting) + : undefined); + message.vmSnapshotReattempts !== undefined && + (obj.vmSnapshotReattempts = message.vmSnapshotReattempts + ? PolicySettings_RetriesConfiguration.toJSON( + message.vmSnapshotReattempts + ) + : undefined); + message.vss !== undefined && + (obj.vss = message.vss + ? PolicySettings_VolumeShadowCopyServiceSettings.toJSON(message.vss) + : undefined); + message.archive !== undefined && + (obj.archive = message.archive + ? PolicySettings_ArchiveProperties.toJSON(message.archive) + : undefined); + message.performanceWindow !== undefined && + (obj.performanceWindow = message.performanceWindow + ? PolicySettings_PerformanceWindow.toJSON(message.performanceWindow) + : undefined); + message.retention !== undefined && + (obj.retention = message.retention + ? PolicySettings_Retention.toJSON(message.retention) + : undefined); + message.scheduling !== undefined && + (obj.scheduling = message.scheduling + ? PolicySettings_Scheduling.toJSON(message.scheduling) + : undefined); + message.cbt !== undefined && + (obj.cbt = policySettings_ChangedBlockTrackingToJSON(message.cbt)); + message.fastBackupEnabled !== undefined && + (obj.fastBackupEnabled = message.fastBackupEnabled); + message.quiesceSnapshottingEnabled !== undefined && + (obj.quiesceSnapshottingEnabled = message.quiesceSnapshottingEnabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): PolicySettings { + const message = { ...basePolicySettings } as PolicySettings; + message.compression = object.compression ?? 0; + message.format = object.format ?? 0; + message.multiVolumeSnapshottingEnabled = + object.multiVolumeSnapshottingEnabled ?? false; + message.preserveFileSecuritySettings = + object.preserveFileSecuritySettings ?? false; + message.reattempts = + object.reattempts !== undefined && object.reattempts !== null + ? PolicySettings_RetriesConfiguration.fromPartial(object.reattempts) + : undefined; + message.silentModeEnabled = object.silentModeEnabled ?? false; + message.splitting = + object.splitting !== undefined && object.splitting !== null + ? PolicySettings_Splitting.fromPartial(object.splitting) + : undefined; + message.vmSnapshotReattempts = + object.vmSnapshotReattempts !== undefined && + object.vmSnapshotReattempts !== null + ? PolicySettings_RetriesConfiguration.fromPartial( + object.vmSnapshotReattempts + ) + : undefined; + message.vss = + object.vss !== undefined && object.vss !== null + ? PolicySettings_VolumeShadowCopyServiceSettings.fromPartial(object.vss) + : undefined; + message.archive = + object.archive !== undefined && object.archive !== null + ? PolicySettings_ArchiveProperties.fromPartial(object.archive) + : undefined; + message.performanceWindow = + object.performanceWindow !== undefined && + object.performanceWindow !== null + ? PolicySettings_PerformanceWindow.fromPartial(object.performanceWindow) + : undefined; + message.retention = + object.retention !== undefined && object.retention !== null + ? PolicySettings_Retention.fromPartial(object.retention) + : undefined; + message.scheduling = + object.scheduling !== undefined && object.scheduling !== null + ? PolicySettings_Scheduling.fromPartial(object.scheduling) + : undefined; + message.cbt = object.cbt ?? 0; + message.fastBackupEnabled = object.fastBackupEnabled ?? false; + message.quiesceSnapshottingEnabled = + object.quiesceSnapshottingEnabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set(PolicySettings.$type, PolicySettings); + +const basePolicySettings_Interval: object = { + $type: "yandex.cloud.backup.v1.PolicySettings.Interval", + type: 0, + count: 0, +}; + +export const PolicySettings_Interval = { + $type: "yandex.cloud.backup.v1.PolicySettings.Interval" as const, + + encode( + message: PolicySettings_Interval, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.type !== 0) { + writer.uint32(8).int32(message.type); + } + if (message.count !== 0) { + writer.uint32(16).int64(message.count); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_Interval { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_Interval, + } as PolicySettings_Interval; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32() as any; + break; + case 2: + message.count = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_Interval { + const message = { + ...basePolicySettings_Interval, + } as PolicySettings_Interval; + message.type = + object.type !== undefined && object.type !== null + ? policySettings_Interval_TypeFromJSON(object.type) + : 0; + message.count = + object.count !== undefined && object.count !== null + ? Number(object.count) + : 0; + return message; + }, + + toJSON(message: PolicySettings_Interval): unknown { + const obj: any = {}; + message.type !== undefined && + (obj.type = policySettings_Interval_TypeToJSON(message.type)); + message.count !== undefined && (obj.count = Math.round(message.count)); + return obj; + }, + + fromPartial, I>>( + object: I + ): PolicySettings_Interval { + const message = { + ...basePolicySettings_Interval, + } as PolicySettings_Interval; + message.type = object.type ?? 0; + message.count = object.count ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(PolicySettings_Interval.$type, PolicySettings_Interval); + +const basePolicySettings_RetriesConfiguration: object = { + $type: "yandex.cloud.backup.v1.PolicySettings.RetriesConfiguration", + enabled: false, + maxAttempts: 0, +}; + +export const PolicySettings_RetriesConfiguration = { + $type: "yandex.cloud.backup.v1.PolicySettings.RetriesConfiguration" as const, + + encode( + message: PolicySettings_RetriesConfiguration, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enabled === true) { + writer.uint32(8).bool(message.enabled); + } + if (message.interval !== undefined) { + PolicySettings_Interval.encode( + message.interval, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.maxAttempts !== 0) { + writer.uint32(24).int64(message.maxAttempts); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_RetriesConfiguration { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_RetriesConfiguration, + } as PolicySettings_RetriesConfiguration; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enabled = reader.bool(); + break; + case 2: + message.interval = PolicySettings_Interval.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.maxAttempts = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_RetriesConfiguration { + const message = { + ...basePolicySettings_RetriesConfiguration, + } as PolicySettings_RetriesConfiguration; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + message.interval = + object.interval !== undefined && object.interval !== null + ? PolicySettings_Interval.fromJSON(object.interval) + : undefined; + message.maxAttempts = + object.maxAttempts !== undefined && object.maxAttempts !== null + ? Number(object.maxAttempts) + : 0; + return message; + }, + + toJSON(message: PolicySettings_RetriesConfiguration): unknown { + const obj: any = {}; + message.enabled !== undefined && (obj.enabled = message.enabled); + message.interval !== undefined && + (obj.interval = message.interval + ? PolicySettings_Interval.toJSON(message.interval) + : undefined); + message.maxAttempts !== undefined && + (obj.maxAttempts = Math.round(message.maxAttempts)); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): PolicySettings_RetriesConfiguration { + const message = { + ...basePolicySettings_RetriesConfiguration, + } as PolicySettings_RetriesConfiguration; + message.enabled = object.enabled ?? false; + message.interval = + object.interval !== undefined && object.interval !== null + ? PolicySettings_Interval.fromPartial(object.interval) + : undefined; + message.maxAttempts = object.maxAttempts ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_RetriesConfiguration.$type, + PolicySettings_RetriesConfiguration +); + +const basePolicySettings_Splitting: object = { + $type: "yandex.cloud.backup.v1.PolicySettings.Splitting", + size: 0, +}; + +export const PolicySettings_Splitting = { + $type: "yandex.cloud.backup.v1.PolicySettings.Splitting" as const, + + encode( + message: PolicySettings_Splitting, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.size !== 0) { + writer.uint32(8).int64(message.size); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_Splitting { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_Splitting, + } as PolicySettings_Splitting; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_Splitting { + const message = { + ...basePolicySettings_Splitting, + } as PolicySettings_Splitting; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + return message; + }, + + toJSON(message: PolicySettings_Splitting): unknown { + const obj: any = {}; + message.size !== undefined && (obj.size = Math.round(message.size)); + return obj; + }, + + fromPartial, I>>( + object: I + ): PolicySettings_Splitting { + const message = { + ...basePolicySettings_Splitting, + } as PolicySettings_Splitting; + message.size = object.size ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_Splitting.$type, + PolicySettings_Splitting +); + +const basePolicySettings_VolumeShadowCopyServiceSettings: object = { + $type: + "yandex.cloud.backup.v1.PolicySettings.VolumeShadowCopyServiceSettings", + enabled: false, + provider: 0, +}; + +export const PolicySettings_VolumeShadowCopyServiceSettings = { + $type: + "yandex.cloud.backup.v1.PolicySettings.VolumeShadowCopyServiceSettings" as const, + + encode( + message: PolicySettings_VolumeShadowCopyServiceSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enabled === true) { + writer.uint32(8).bool(message.enabled); + } + if (message.provider !== 0) { + writer.uint32(16).int32(message.provider); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_VolumeShadowCopyServiceSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_VolumeShadowCopyServiceSettings, + } as PolicySettings_VolumeShadowCopyServiceSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enabled = reader.bool(); + break; + case 2: + message.provider = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_VolumeShadowCopyServiceSettings { + const message = { + ...basePolicySettings_VolumeShadowCopyServiceSettings, + } as PolicySettings_VolumeShadowCopyServiceSettings; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + message.provider = + object.provider !== undefined && object.provider !== null + ? policySettings_VolumeShadowCopyServiceSettings_VSSProviderFromJSON( + object.provider + ) + : 0; + return message; + }, + + toJSON(message: PolicySettings_VolumeShadowCopyServiceSettings): unknown { + const obj: any = {}; + message.enabled !== undefined && (obj.enabled = message.enabled); + message.provider !== undefined && + (obj.provider = + policySettings_VolumeShadowCopyServiceSettings_VSSProviderToJSON( + message.provider + )); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): PolicySettings_VolumeShadowCopyServiceSettings { + const message = { + ...basePolicySettings_VolumeShadowCopyServiceSettings, + } as PolicySettings_VolumeShadowCopyServiceSettings; + message.enabled = object.enabled ?? false; + message.provider = object.provider ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_VolumeShadowCopyServiceSettings.$type, + PolicySettings_VolumeShadowCopyServiceSettings +); + +const basePolicySettings_ArchiveProperties: object = { + $type: "yandex.cloud.backup.v1.PolicySettings.ArchiveProperties", + name: "", +}; + +export const PolicySettings_ArchiveProperties = { + $type: "yandex.cloud.backup.v1.PolicySettings.ArchiveProperties" as const, + + encode( + message: PolicySettings_ArchiveProperties, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_ArchiveProperties { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_ArchiveProperties, + } as PolicySettings_ArchiveProperties; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_ArchiveProperties { + const message = { + ...basePolicySettings_ArchiveProperties, + } as PolicySettings_ArchiveProperties; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: PolicySettings_ArchiveProperties): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): PolicySettings_ArchiveProperties { + const message = { + ...basePolicySettings_ArchiveProperties, + } as PolicySettings_ArchiveProperties; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_ArchiveProperties.$type, + PolicySettings_ArchiveProperties +); + +const basePolicySettings_PerformanceWindow: object = { + $type: "yandex.cloud.backup.v1.PolicySettings.PerformanceWindow", + enabled: false, +}; + +export const PolicySettings_PerformanceWindow = { + $type: "yandex.cloud.backup.v1.PolicySettings.PerformanceWindow" as const, + + encode( + message: PolicySettings_PerformanceWindow, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enabled === true) { + writer.uint32(8).bool(message.enabled); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_PerformanceWindow { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_PerformanceWindow, + } as PolicySettings_PerformanceWindow; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_PerformanceWindow { + const message = { + ...basePolicySettings_PerformanceWindow, + } as PolicySettings_PerformanceWindow; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + return message; + }, + + toJSON(message: PolicySettings_PerformanceWindow): unknown { + const obj: any = {}; + message.enabled !== undefined && (obj.enabled = message.enabled); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): PolicySettings_PerformanceWindow { + const message = { + ...basePolicySettings_PerformanceWindow, + } as PolicySettings_PerformanceWindow; + message.enabled = object.enabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_PerformanceWindow.$type, + PolicySettings_PerformanceWindow +); + +const basePolicySettings_TimeOfDay: object = { + $type: "yandex.cloud.backup.v1.PolicySettings.TimeOfDay", + hour: 0, + minute: 0, +}; + +export const PolicySettings_TimeOfDay = { + $type: "yandex.cloud.backup.v1.PolicySettings.TimeOfDay" as const, + + encode( + message: PolicySettings_TimeOfDay, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hour !== 0) { + writer.uint32(8).int64(message.hour); + } + if (message.minute !== 0) { + writer.uint32(16).int64(message.minute); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_TimeOfDay { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_TimeOfDay, + } as PolicySettings_TimeOfDay; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hour = longToNumber(reader.int64() as Long); + break; + case 2: + message.minute = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_TimeOfDay { + const message = { + ...basePolicySettings_TimeOfDay, + } as PolicySettings_TimeOfDay; + message.hour = + object.hour !== undefined && object.hour !== null + ? Number(object.hour) + : 0; + message.minute = + object.minute !== undefined && object.minute !== null + ? Number(object.minute) + : 0; + return message; + }, + + toJSON(message: PolicySettings_TimeOfDay): unknown { + const obj: any = {}; + message.hour !== undefined && (obj.hour = Math.round(message.hour)); + message.minute !== undefined && (obj.minute = Math.round(message.minute)); + return obj; + }, + + fromPartial, I>>( + object: I + ): PolicySettings_TimeOfDay { + const message = { + ...basePolicySettings_TimeOfDay, + } as PolicySettings_TimeOfDay; + message.hour = object.hour ?? 0; + message.minute = object.minute ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_TimeOfDay.$type, + PolicySettings_TimeOfDay +); + +const basePolicySettings_Retention: object = { + $type: "yandex.cloud.backup.v1.PolicySettings.Retention", + afterBackup: false, +}; + +export const PolicySettings_Retention = { + $type: "yandex.cloud.backup.v1.PolicySettings.Retention" as const, + + encode( + message: PolicySettings_Retention, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.rules) { + PolicySettings_Retention_RetentionRule.encode( + v!, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.afterBackup === true) { + writer.uint32(16).bool(message.afterBackup); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_Retention { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_Retention, + } as PolicySettings_Retention; + message.rules = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.rules.push( + PolicySettings_Retention_RetentionRule.decode( + reader, + reader.uint32() + ) + ); + break; + case 2: + message.afterBackup = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_Retention { + const message = { + ...basePolicySettings_Retention, + } as PolicySettings_Retention; + message.rules = (object.rules ?? []).map((e: any) => + PolicySettings_Retention_RetentionRule.fromJSON(e) + ); + message.afterBackup = + object.afterBackup !== undefined && object.afterBackup !== null + ? Boolean(object.afterBackup) + : false; + return message; + }, + + toJSON(message: PolicySettings_Retention): unknown { + const obj: any = {}; + if (message.rules) { + obj.rules = message.rules.map((e) => + e ? PolicySettings_Retention_RetentionRule.toJSON(e) : undefined + ); + } else { + obj.rules = []; + } + message.afterBackup !== undefined && + (obj.afterBackup = message.afterBackup); + return obj; + }, + + fromPartial, I>>( + object: I + ): PolicySettings_Retention { + const message = { + ...basePolicySettings_Retention, + } as PolicySettings_Retention; + message.rules = + object.rules?.map((e) => + PolicySettings_Retention_RetentionRule.fromPartial(e) + ) || []; + message.afterBackup = object.afterBackup ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_Retention.$type, + PolicySettings_Retention +); + +const basePolicySettings_Retention_RetentionRule: object = { + $type: "yandex.cloud.backup.v1.PolicySettings.Retention.RetentionRule", + backupSet: 0, +}; + +export const PolicySettings_Retention_RetentionRule = { + $type: + "yandex.cloud.backup.v1.PolicySettings.Retention.RetentionRule" as const, + + encode( + message: PolicySettings_Retention_RetentionRule, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + writer.uint32(10).fork(); + for (const v of message.backupSet) { + writer.int32(v); + } + writer.ldelim(); + if (message.maxAge !== undefined) { + PolicySettings_Interval.encode( + message.maxAge, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.maxCount !== undefined) { + writer.uint32(24).int64(message.maxCount); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_Retention_RetentionRule { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_Retention_RetentionRule, + } as PolicySettings_Retention_RetentionRule; + message.backupSet = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.backupSet.push(reader.int32() as any); + } + } else { + message.backupSet.push(reader.int32() as any); + } + break; + case 2: + message.maxAge = PolicySettings_Interval.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.maxCount = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_Retention_RetentionRule { + const message = { + ...basePolicySettings_Retention_RetentionRule, + } as PolicySettings_Retention_RetentionRule; + message.backupSet = (object.backupSet ?? []).map((e: any) => + policySettings_RepeatePeriodFromJSON(e) + ); + message.maxAge = + object.maxAge !== undefined && object.maxAge !== null + ? PolicySettings_Interval.fromJSON(object.maxAge) + : undefined; + message.maxCount = + object.maxCount !== undefined && object.maxCount !== null + ? Number(object.maxCount) + : undefined; + return message; + }, + + toJSON(message: PolicySettings_Retention_RetentionRule): unknown { + const obj: any = {}; + if (message.backupSet) { + obj.backupSet = message.backupSet.map((e) => + policySettings_RepeatePeriodToJSON(e) + ); + } else { + obj.backupSet = []; + } + message.maxAge !== undefined && + (obj.maxAge = message.maxAge + ? PolicySettings_Interval.toJSON(message.maxAge) + : undefined); + message.maxCount !== undefined && + (obj.maxCount = Math.round(message.maxCount)); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): PolicySettings_Retention_RetentionRule { + const message = { + ...basePolicySettings_Retention_RetentionRule, + } as PolicySettings_Retention_RetentionRule; + message.backupSet = object.backupSet?.map((e) => e) || []; + message.maxAge = + object.maxAge !== undefined && object.maxAge !== null + ? PolicySettings_Interval.fromPartial(object.maxAge) + : undefined; + message.maxCount = object.maxCount ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_Retention_RetentionRule.$type, + PolicySettings_Retention_RetentionRule +); + +const basePolicySettings_Scheduling: object = { + $type: "yandex.cloud.backup.v1.PolicySettings.Scheduling", + enabled: false, + maxParallelBackups: 0, + scheme: 0, + weeklyBackupDay: 0, +}; + +export const PolicySettings_Scheduling = { + $type: "yandex.cloud.backup.v1.PolicySettings.Scheduling" as const, + + encode( + message: PolicySettings_Scheduling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backupSets) { + PolicySettings_Scheduling_BackupSet.encode( + v!, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.enabled === true) { + writer.uint32(16).bool(message.enabled); + } + if (message.maxParallelBackups !== 0) { + writer.uint32(24).int64(message.maxParallelBackups); + } + if (message.randMaxDelay !== undefined) { + PolicySettings_Interval.encode( + message.randMaxDelay, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.scheme !== 0) { + writer.uint32(40).int32(message.scheme); + } + if (message.weeklyBackupDay !== 0) { + writer.uint32(48).int32(message.weeklyBackupDay); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_Scheduling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_Scheduling, + } as PolicySettings_Scheduling; + message.backupSets = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupSets.push( + PolicySettings_Scheduling_BackupSet.decode(reader, reader.uint32()) + ); + break; + case 2: + message.enabled = reader.bool(); + break; + case 3: + message.maxParallelBackups = longToNumber(reader.int64() as Long); + break; + case 4: + message.randMaxDelay = PolicySettings_Interval.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.scheme = reader.int32() as any; + break; + case 6: + message.weeklyBackupDay = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_Scheduling { + const message = { + ...basePolicySettings_Scheduling, + } as PolicySettings_Scheduling; + message.backupSets = (object.backupSets ?? []).map((e: any) => + PolicySettings_Scheduling_BackupSet.fromJSON(e) + ); + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + message.maxParallelBackups = + object.maxParallelBackups !== undefined && + object.maxParallelBackups !== null + ? Number(object.maxParallelBackups) + : 0; + message.randMaxDelay = + object.randMaxDelay !== undefined && object.randMaxDelay !== null + ? PolicySettings_Interval.fromJSON(object.randMaxDelay) + : undefined; + message.scheme = + object.scheme !== undefined && object.scheme !== null + ? policySettings_Scheduling_SchemeFromJSON(object.scheme) + : 0; + message.weeklyBackupDay = + object.weeklyBackupDay !== undefined && object.weeklyBackupDay !== null + ? policySettings_DayFromJSON(object.weeklyBackupDay) + : 0; + return message; + }, + + toJSON(message: PolicySettings_Scheduling): unknown { + const obj: any = {}; + if (message.backupSets) { + obj.backupSets = message.backupSets.map((e) => + e ? PolicySettings_Scheduling_BackupSet.toJSON(e) : undefined + ); + } else { + obj.backupSets = []; + } + message.enabled !== undefined && (obj.enabled = message.enabled); + message.maxParallelBackups !== undefined && + (obj.maxParallelBackups = Math.round(message.maxParallelBackups)); + message.randMaxDelay !== undefined && + (obj.randMaxDelay = message.randMaxDelay + ? PolicySettings_Interval.toJSON(message.randMaxDelay) + : undefined); + message.scheme !== undefined && + (obj.scheme = policySettings_Scheduling_SchemeToJSON(message.scheme)); + message.weeklyBackupDay !== undefined && + (obj.weeklyBackupDay = policySettings_DayToJSON(message.weeklyBackupDay)); + return obj; + }, + + fromPartial, I>>( + object: I + ): PolicySettings_Scheduling { + const message = { + ...basePolicySettings_Scheduling, + } as PolicySettings_Scheduling; + message.backupSets = + object.backupSets?.map((e) => + PolicySettings_Scheduling_BackupSet.fromPartial(e) + ) || []; + message.enabled = object.enabled ?? false; + message.maxParallelBackups = object.maxParallelBackups ?? 0; + message.randMaxDelay = + object.randMaxDelay !== undefined && object.randMaxDelay !== null + ? PolicySettings_Interval.fromPartial(object.randMaxDelay) + : undefined; + message.scheme = object.scheme ?? 0; + message.weeklyBackupDay = object.weeklyBackupDay ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_Scheduling.$type, + PolicySettings_Scheduling +); + +const basePolicySettings_Scheduling_BackupSet: object = { + $type: "yandex.cloud.backup.v1.PolicySettings.Scheduling.BackupSet", +}; + +export const PolicySettings_Scheduling_BackupSet = { + $type: "yandex.cloud.backup.v1.PolicySettings.Scheduling.BackupSet" as const, + + encode( + message: PolicySettings_Scheduling_BackupSet, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.time !== undefined) { + PolicySettings_Scheduling_BackupSet_Time.encode( + message.time, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sinceLastExecTime !== undefined) { + PolicySettings_Scheduling_BackupSet_SinceLastExecTime.encode( + message.sinceLastExecTime, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_Scheduling_BackupSet { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_Scheduling_BackupSet, + } as PolicySettings_Scheduling_BackupSet; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.time = PolicySettings_Scheduling_BackupSet_Time.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.sinceLastExecTime = + PolicySettings_Scheduling_BackupSet_SinceLastExecTime.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_Scheduling_BackupSet { + const message = { + ...basePolicySettings_Scheduling_BackupSet, + } as PolicySettings_Scheduling_BackupSet; + message.time = + object.time !== undefined && object.time !== null + ? PolicySettings_Scheduling_BackupSet_Time.fromJSON(object.time) + : undefined; + message.sinceLastExecTime = + object.sinceLastExecTime !== undefined && + object.sinceLastExecTime !== null + ? PolicySettings_Scheduling_BackupSet_SinceLastExecTime.fromJSON( + object.sinceLastExecTime + ) + : undefined; + return message; + }, + + toJSON(message: PolicySettings_Scheduling_BackupSet): unknown { + const obj: any = {}; + message.time !== undefined && + (obj.time = message.time + ? PolicySettings_Scheduling_BackupSet_Time.toJSON(message.time) + : undefined); + message.sinceLastExecTime !== undefined && + (obj.sinceLastExecTime = message.sinceLastExecTime + ? PolicySettings_Scheduling_BackupSet_SinceLastExecTime.toJSON( + message.sinceLastExecTime + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): PolicySettings_Scheduling_BackupSet { + const message = { + ...basePolicySettings_Scheduling_BackupSet, + } as PolicySettings_Scheduling_BackupSet; + message.time = + object.time !== undefined && object.time !== null + ? PolicySettings_Scheduling_BackupSet_Time.fromPartial(object.time) + : undefined; + message.sinceLastExecTime = + object.sinceLastExecTime !== undefined && + object.sinceLastExecTime !== null + ? PolicySettings_Scheduling_BackupSet_SinceLastExecTime.fromPartial( + object.sinceLastExecTime + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_Scheduling_BackupSet.$type, + PolicySettings_Scheduling_BackupSet +); + +const basePolicySettings_Scheduling_BackupSet_Time: object = { + $type: "yandex.cloud.backup.v1.PolicySettings.Scheduling.BackupSet.Time", + weekdays: 0, + monthdays: 0, + includeLastDayOfMonth: false, + months: 0, + type: 0, +}; + +export const PolicySettings_Scheduling_BackupSet_Time = { + $type: + "yandex.cloud.backup.v1.PolicySettings.Scheduling.BackupSet.Time" as const, + + encode( + message: PolicySettings_Scheduling_BackupSet_Time, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + writer.uint32(10).fork(); + for (const v of message.weekdays) { + writer.int32(v); + } + writer.ldelim(); + for (const v of message.repeatAt) { + PolicySettings_TimeOfDay.encode(v!, writer.uint32(18).fork()).ldelim(); + } + if (message.repeatEvery !== undefined) { + PolicySettings_Interval.encode( + message.repeatEvery, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.timeFrom !== undefined) { + PolicySettings_TimeOfDay.encode( + message.timeFrom, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.timeTo !== undefined) { + PolicySettings_TimeOfDay.encode( + message.timeTo, + writer.uint32(42).fork() + ).ldelim(); + } + writer.uint32(50).fork(); + for (const v of message.monthdays) { + writer.int64(v); + } + writer.ldelim(); + if (message.includeLastDayOfMonth === true) { + writer.uint32(56).bool(message.includeLastDayOfMonth); + } + writer.uint32(66).fork(); + for (const v of message.months) { + writer.int64(v); + } + writer.ldelim(); + if (message.type !== 0) { + writer.uint32(72).int32(message.type); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_Scheduling_BackupSet_Time { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_Scheduling_BackupSet_Time, + } as PolicySettings_Scheduling_BackupSet_Time; + message.weekdays = []; + message.repeatAt = []; + message.monthdays = []; + message.months = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.weekdays.push(reader.int32() as any); + } + } else { + message.weekdays.push(reader.int32() as any); + } + break; + case 2: + message.repeatAt.push( + PolicySettings_TimeOfDay.decode(reader, reader.uint32()) + ); + break; + case 3: + message.repeatEvery = PolicySettings_Interval.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.timeFrom = PolicySettings_TimeOfDay.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.timeTo = PolicySettings_TimeOfDay.decode( + reader, + reader.uint32() + ); + break; + case 6: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.monthdays.push(longToNumber(reader.int64() as Long)); + } + } else { + message.monthdays.push(longToNumber(reader.int64() as Long)); + } + break; + case 7: + message.includeLastDayOfMonth = reader.bool(); + break; + case 8: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.months.push(longToNumber(reader.int64() as Long)); + } + } else { + message.months.push(longToNumber(reader.int64() as Long)); + } + break; + case 9: + message.type = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_Scheduling_BackupSet_Time { + const message = { + ...basePolicySettings_Scheduling_BackupSet_Time, + } as PolicySettings_Scheduling_BackupSet_Time; + message.weekdays = (object.weekdays ?? []).map((e: any) => + policySettings_DayFromJSON(e) + ); + message.repeatAt = (object.repeatAt ?? []).map((e: any) => + PolicySettings_TimeOfDay.fromJSON(e) + ); + message.repeatEvery = + object.repeatEvery !== undefined && object.repeatEvery !== null + ? PolicySettings_Interval.fromJSON(object.repeatEvery) + : undefined; + message.timeFrom = + object.timeFrom !== undefined && object.timeFrom !== null + ? PolicySettings_TimeOfDay.fromJSON(object.timeFrom) + : undefined; + message.timeTo = + object.timeTo !== undefined && object.timeTo !== null + ? PolicySettings_TimeOfDay.fromJSON(object.timeTo) + : undefined; + message.monthdays = (object.monthdays ?? []).map((e: any) => Number(e)); + message.includeLastDayOfMonth = + object.includeLastDayOfMonth !== undefined && + object.includeLastDayOfMonth !== null + ? Boolean(object.includeLastDayOfMonth) + : false; + message.months = (object.months ?? []).map((e: any) => Number(e)); + message.type = + object.type !== undefined && object.type !== null + ? policySettings_RepeatePeriodFromJSON(object.type) + : 0; + return message; + }, + + toJSON(message: PolicySettings_Scheduling_BackupSet_Time): unknown { + const obj: any = {}; + if (message.weekdays) { + obj.weekdays = message.weekdays.map((e) => policySettings_DayToJSON(e)); + } else { + obj.weekdays = []; + } + if (message.repeatAt) { + obj.repeatAt = message.repeatAt.map((e) => + e ? PolicySettings_TimeOfDay.toJSON(e) : undefined + ); + } else { + obj.repeatAt = []; + } + message.repeatEvery !== undefined && + (obj.repeatEvery = message.repeatEvery + ? PolicySettings_Interval.toJSON(message.repeatEvery) + : undefined); + message.timeFrom !== undefined && + (obj.timeFrom = message.timeFrom + ? PolicySettings_TimeOfDay.toJSON(message.timeFrom) + : undefined); + message.timeTo !== undefined && + (obj.timeTo = message.timeTo + ? PolicySettings_TimeOfDay.toJSON(message.timeTo) + : undefined); + if (message.monthdays) { + obj.monthdays = message.monthdays.map((e) => Math.round(e)); + } else { + obj.monthdays = []; + } + message.includeLastDayOfMonth !== undefined && + (obj.includeLastDayOfMonth = message.includeLastDayOfMonth); + if (message.months) { + obj.months = message.months.map((e) => Math.round(e)); + } else { + obj.months = []; + } + message.type !== undefined && + (obj.type = policySettings_RepeatePeriodToJSON(message.type)); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): PolicySettings_Scheduling_BackupSet_Time { + const message = { + ...basePolicySettings_Scheduling_BackupSet_Time, + } as PolicySettings_Scheduling_BackupSet_Time; + message.weekdays = object.weekdays?.map((e) => e) || []; + message.repeatAt = + object.repeatAt?.map((e) => PolicySettings_TimeOfDay.fromPartial(e)) || + []; + message.repeatEvery = + object.repeatEvery !== undefined && object.repeatEvery !== null + ? PolicySettings_Interval.fromPartial(object.repeatEvery) + : undefined; + message.timeFrom = + object.timeFrom !== undefined && object.timeFrom !== null + ? PolicySettings_TimeOfDay.fromPartial(object.timeFrom) + : undefined; + message.timeTo = + object.timeTo !== undefined && object.timeTo !== null + ? PolicySettings_TimeOfDay.fromPartial(object.timeTo) + : undefined; + message.monthdays = object.monthdays?.map((e) => e) || []; + message.includeLastDayOfMonth = object.includeLastDayOfMonth ?? false; + message.months = object.months?.map((e) => e) || []; + message.type = object.type ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_Scheduling_BackupSet_Time.$type, + PolicySettings_Scheduling_BackupSet_Time +); + +const basePolicySettings_Scheduling_BackupSet_SinceLastExecTime: object = { + $type: + "yandex.cloud.backup.v1.PolicySettings.Scheduling.BackupSet.SinceLastExecTime", +}; + +export const PolicySettings_Scheduling_BackupSet_SinceLastExecTime = { + $type: + "yandex.cloud.backup.v1.PolicySettings.Scheduling.BackupSet.SinceLastExecTime" as const, + + encode( + message: PolicySettings_Scheduling_BackupSet_SinceLastExecTime, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.delay !== undefined) { + PolicySettings_Interval.encode( + message.delay, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PolicySettings_Scheduling_BackupSet_SinceLastExecTime { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePolicySettings_Scheduling_BackupSet_SinceLastExecTime, + } as PolicySettings_Scheduling_BackupSet_SinceLastExecTime; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.delay = PolicySettings_Interval.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicySettings_Scheduling_BackupSet_SinceLastExecTime { + const message = { + ...basePolicySettings_Scheduling_BackupSet_SinceLastExecTime, + } as PolicySettings_Scheduling_BackupSet_SinceLastExecTime; + message.delay = + object.delay !== undefined && object.delay !== null + ? PolicySettings_Interval.fromJSON(object.delay) + : undefined; + return message; + }, + + toJSON( + message: PolicySettings_Scheduling_BackupSet_SinceLastExecTime + ): unknown { + const obj: any = {}; + message.delay !== undefined && + (obj.delay = message.delay + ? PolicySettings_Interval.toJSON(message.delay) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): PolicySettings_Scheduling_BackupSet_SinceLastExecTime { + const message = { + ...basePolicySettings_Scheduling_BackupSet_SinceLastExecTime, + } as PolicySettings_Scheduling_BackupSet_SinceLastExecTime; + message.delay = + object.delay !== undefined && object.delay !== null + ? PolicySettings_Interval.fromPartial(object.delay) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + PolicySettings_Scheduling_BackupSet_SinceLastExecTime.$type, + PolicySettings_Scheduling_BackupSet_SinceLastExecTime +); + +const basePolicyApplication: object = { + $type: "yandex.cloud.backup.v1.PolicyApplication", + policyId: "", + computeInstanceId: "", + enabled: false, + status: 0, +}; + +export const PolicyApplication = { + $type: "yandex.cloud.backup.v1.PolicyApplication" as const, + + encode( + message: PolicyApplication, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + if (message.enabled === true) { + writer.uint32(24).bool(message.enabled); + } + if (message.status !== 0) { + writer.uint32(32).int32(message.status); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PolicyApplication { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePolicyApplication } as PolicyApplication; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + case 3: + message.enabled = reader.bool(); + break; + case 4: + message.status = reader.int32() as any; + break; + case 5: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PolicyApplication { + const message = { ...basePolicyApplication } as PolicyApplication; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + message.status = + object.status !== undefined && object.status !== null + ? policyApplication_StatusFromJSON(object.status) + : 0; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + return message; + }, + + toJSON(message: PolicyApplication): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.enabled !== undefined && (obj.enabled = message.enabled); + message.status !== undefined && + (obj.status = policyApplication_StatusToJSON(message.status)); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): PolicyApplication { + const message = { ...basePolicyApplication } as PolicyApplication; + message.policyId = object.policyId ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.enabled = object.enabled ?? false; + message.status = object.status ?? 0; + message.createdAt = object.createdAt ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(PolicyApplication.$type, PolicyApplication); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/backup/v1/policy_service.ts b/src/generated/yandex/cloud/backup/v1/policy_service.ts new file mode 100644 index 00000000..3fd782c4 --- /dev/null +++ b/src/generated/yandex/cloud/backup/v1/policy_service.ts @@ -0,0 +1,1802 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + PolicySettings, + Policy, + PolicyApplication, +} from "../../../../yandex/cloud/backup/v1/policy"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.backup.v1"; + +export interface ListPoliciesRequest { + $type: "yandex.cloud.backup.v1.ListPoliciesRequest"; + /** Folder ID. Either Folder ID or Compute Cloud instance ID should be set. */ + folderId: string; + /** Compute Cloud instance ID. Either Folder ID or Compute Cloud instance ID should be set. */ + computeInstanceId: string; +} + +export interface ListPoliciesResponse { + $type: "yandex.cloud.backup.v1.ListPoliciesResponse"; + policies: Policy[]; +} + +export interface CreatePolicyRequest { + $type: "yandex.cloud.backup.v1.CreatePolicyRequest"; + /** Folder ID. */ + folderId: string; + /** Policy name. */ + name: string; + settings?: PolicySettings; +} + +export interface CreatePolicyMetadata { + $type: "yandex.cloud.backup.v1.CreatePolicyMetadata"; + /** Policy ID. */ + policyId: string; +} + +export interface GetPolicyRequest { + $type: "yandex.cloud.backup.v1.GetPolicyRequest"; + /** Policy ID. */ + policyId: string; +} + +export interface UpdatePolicyRequest { + $type: "yandex.cloud.backup.v1.UpdatePolicyRequest"; + /** Policy ID. */ + policyId: string; + settings?: PolicySettings; +} + +export interface UpdatePolicyMetadata { + $type: "yandex.cloud.backup.v1.UpdatePolicyMetadata"; + /** Policy ID. */ + policyId: string; +} + +export interface DeletePolicyRequest { + $type: "yandex.cloud.backup.v1.DeletePolicyRequest"; + /** Policy ID. */ + policyId: string; +} + +export interface DeletePolicyMetadata { + $type: "yandex.cloud.backup.v1.DeletePolicyMetadata"; + /** Policy ID. */ + policyId: string; +} + +export interface ApplyPolicyRequest { + $type: "yandex.cloud.backup.v1.ApplyPolicyRequest"; + /** Policy ID. */ + policyId: string; + /** Compute Cloud instance ID. */ + computeInstanceId: string; +} + +export interface ApplyPolicyMetadata { + $type: "yandex.cloud.backup.v1.ApplyPolicyMetadata"; + /** Policy ID. */ + policyId: string; + /** Compute Cloud instance ID. */ + computeInstanceId: string; +} + +export interface ListApplicationsRequest { + $type: "yandex.cloud.backup.v1.ListApplicationsRequest"; + /** Folder ID. */ + folderId: string | undefined; + /** Policy ID. */ + policyId: string | undefined; + /** Compute Cloud instance ID. */ + computeInstanceId: string | undefined; +} + +export interface ListApplicationsResponse { + $type: "yandex.cloud.backup.v1.ListApplicationsResponse"; + applications: PolicyApplication[]; +} + +export interface ExecuteRequest { + $type: "yandex.cloud.backup.v1.ExecuteRequest"; + /** Policy ID. */ + policyId: string; + /** Compute Cloud instance ID. */ + computeInstanceId: string; +} + +export interface ExecuteMetadata { + $type: "yandex.cloud.backup.v1.ExecuteMetadata"; + /** Policy ID. */ + policyId: string; + /** Compute Cloud instance ID. */ + computeInstanceId: string; +} + +export interface RevokeRequest { + $type: "yandex.cloud.backup.v1.RevokeRequest"; + /** Policy ID. */ + policyId: string; + /** Compute Cloud instance ID. */ + computeInstanceId: string; +} + +export interface RevokeMetadata { + $type: "yandex.cloud.backup.v1.RevokeMetadata"; + /** Policy ID. */ + policyId: string; + /** Compute Cloud instance ID. */ + computeInstanceId: string; +} + +const baseListPoliciesRequest: object = { + $type: "yandex.cloud.backup.v1.ListPoliciesRequest", + folderId: "", + computeInstanceId: "", +}; + +export const ListPoliciesRequest = { + $type: "yandex.cloud.backup.v1.ListPoliciesRequest" as const, + + encode( + message: ListPoliciesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListPoliciesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListPoliciesRequest } as ListPoliciesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListPoliciesRequest { + const message = { ...baseListPoliciesRequest } as ListPoliciesRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + return message; + }, + + toJSON(message: ListPoliciesRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListPoliciesRequest { + const message = { ...baseListPoliciesRequest } as ListPoliciesRequest; + message.folderId = object.folderId ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListPoliciesRequest.$type, ListPoliciesRequest); + +const baseListPoliciesResponse: object = { + $type: "yandex.cloud.backup.v1.ListPoliciesResponse", +}; + +export const ListPoliciesResponse = { + $type: "yandex.cloud.backup.v1.ListPoliciesResponse" as const, + + encode( + message: ListPoliciesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.policies) { + Policy.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListPoliciesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListPoliciesResponse } as ListPoliciesResponse; + message.policies = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policies.push(Policy.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListPoliciesResponse { + const message = { ...baseListPoliciesResponse } as ListPoliciesResponse; + message.policies = (object.policies ?? []).map((e: any) => + Policy.fromJSON(e) + ); + return message; + }, + + toJSON(message: ListPoliciesResponse): unknown { + const obj: any = {}; + if (message.policies) { + obj.policies = message.policies.map((e) => + e ? Policy.toJSON(e) : undefined + ); + } else { + obj.policies = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ListPoliciesResponse { + const message = { ...baseListPoliciesResponse } as ListPoliciesResponse; + message.policies = object.policies?.map((e) => Policy.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(ListPoliciesResponse.$type, ListPoliciesResponse); + +const baseCreatePolicyRequest: object = { + $type: "yandex.cloud.backup.v1.CreatePolicyRequest", + folderId: "", + name: "", +}; + +export const CreatePolicyRequest = { + $type: "yandex.cloud.backup.v1.CreatePolicyRequest" as const, + + encode( + message: CreatePolicyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.settings !== undefined) { + PolicySettings.encode( + message.settings, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreatePolicyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreatePolicyRequest } as CreatePolicyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.settings = PolicySettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreatePolicyRequest { + const message = { ...baseCreatePolicyRequest } as CreatePolicyRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? PolicySettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: CreatePolicyRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.settings !== undefined && + (obj.settings = message.settings + ? PolicySettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreatePolicyRequest { + const message = { ...baseCreatePolicyRequest } as CreatePolicyRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? PolicySettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(CreatePolicyRequest.$type, CreatePolicyRequest); + +const baseCreatePolicyMetadata: object = { + $type: "yandex.cloud.backup.v1.CreatePolicyMetadata", + policyId: "", +}; + +export const CreatePolicyMetadata = { + $type: "yandex.cloud.backup.v1.CreatePolicyMetadata" as const, + + encode( + message: CreatePolicyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreatePolicyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreatePolicyMetadata } as CreatePolicyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreatePolicyMetadata { + const message = { ...baseCreatePolicyMetadata } as CreatePolicyMetadata; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + return message; + }, + + toJSON(message: CreatePolicyMetadata): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreatePolicyMetadata { + const message = { ...baseCreatePolicyMetadata } as CreatePolicyMetadata; + message.policyId = object.policyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreatePolicyMetadata.$type, CreatePolicyMetadata); + +const baseGetPolicyRequest: object = { + $type: "yandex.cloud.backup.v1.GetPolicyRequest", + policyId: "", +}; + +export const GetPolicyRequest = { + $type: "yandex.cloud.backup.v1.GetPolicyRequest" as const, + + encode( + message: GetPolicyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetPolicyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetPolicyRequest } as GetPolicyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetPolicyRequest { + const message = { ...baseGetPolicyRequest } as GetPolicyRequest; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + return message; + }, + + toJSON(message: GetPolicyRequest): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetPolicyRequest { + const message = { ...baseGetPolicyRequest } as GetPolicyRequest; + message.policyId = object.policyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetPolicyRequest.$type, GetPolicyRequest); + +const baseUpdatePolicyRequest: object = { + $type: "yandex.cloud.backup.v1.UpdatePolicyRequest", + policyId: "", +}; + +export const UpdatePolicyRequest = { + $type: "yandex.cloud.backup.v1.UpdatePolicyRequest" as const, + + encode( + message: UpdatePolicyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + if (message.settings !== undefined) { + PolicySettings.encode( + message.settings, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdatePolicyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdatePolicyRequest } as UpdatePolicyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + case 2: + message.settings = PolicySettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdatePolicyRequest { + const message = { ...baseUpdatePolicyRequest } as UpdatePolicyRequest; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? PolicySettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: UpdatePolicyRequest): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + message.settings !== undefined && + (obj.settings = message.settings + ? PolicySettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdatePolicyRequest { + const message = { ...baseUpdatePolicyRequest } as UpdatePolicyRequest; + message.policyId = object.policyId ?? ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? PolicySettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdatePolicyRequest.$type, UpdatePolicyRequest); + +const baseUpdatePolicyMetadata: object = { + $type: "yandex.cloud.backup.v1.UpdatePolicyMetadata", + policyId: "", +}; + +export const UpdatePolicyMetadata = { + $type: "yandex.cloud.backup.v1.UpdatePolicyMetadata" as const, + + encode( + message: UpdatePolicyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdatePolicyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdatePolicyMetadata } as UpdatePolicyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdatePolicyMetadata { + const message = { ...baseUpdatePolicyMetadata } as UpdatePolicyMetadata; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + return message; + }, + + toJSON(message: UpdatePolicyMetadata): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdatePolicyMetadata { + const message = { ...baseUpdatePolicyMetadata } as UpdatePolicyMetadata; + message.policyId = object.policyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdatePolicyMetadata.$type, UpdatePolicyMetadata); + +const baseDeletePolicyRequest: object = { + $type: "yandex.cloud.backup.v1.DeletePolicyRequest", + policyId: "", +}; + +export const DeletePolicyRequest = { + $type: "yandex.cloud.backup.v1.DeletePolicyRequest" as const, + + encode( + message: DeletePolicyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeletePolicyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeletePolicyRequest } as DeletePolicyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeletePolicyRequest { + const message = { ...baseDeletePolicyRequest } as DeletePolicyRequest; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + return message; + }, + + toJSON(message: DeletePolicyRequest): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeletePolicyRequest { + const message = { ...baseDeletePolicyRequest } as DeletePolicyRequest; + message.policyId = object.policyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeletePolicyRequest.$type, DeletePolicyRequest); + +const baseDeletePolicyMetadata: object = { + $type: "yandex.cloud.backup.v1.DeletePolicyMetadata", + policyId: "", +}; + +export const DeletePolicyMetadata = { + $type: "yandex.cloud.backup.v1.DeletePolicyMetadata" as const, + + encode( + message: DeletePolicyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeletePolicyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeletePolicyMetadata } as DeletePolicyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeletePolicyMetadata { + const message = { ...baseDeletePolicyMetadata } as DeletePolicyMetadata; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + return message; + }, + + toJSON(message: DeletePolicyMetadata): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeletePolicyMetadata { + const message = { ...baseDeletePolicyMetadata } as DeletePolicyMetadata; + message.policyId = object.policyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeletePolicyMetadata.$type, DeletePolicyMetadata); + +const baseApplyPolicyRequest: object = { + $type: "yandex.cloud.backup.v1.ApplyPolicyRequest", + policyId: "", + computeInstanceId: "", +}; + +export const ApplyPolicyRequest = { + $type: "yandex.cloud.backup.v1.ApplyPolicyRequest" as const, + + encode( + message: ApplyPolicyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ApplyPolicyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseApplyPolicyRequest } as ApplyPolicyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ApplyPolicyRequest { + const message = { ...baseApplyPolicyRequest } as ApplyPolicyRequest; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + return message; + }, + + toJSON(message: ApplyPolicyRequest): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ApplyPolicyRequest { + const message = { ...baseApplyPolicyRequest } as ApplyPolicyRequest; + message.policyId = object.policyId ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ApplyPolicyRequest.$type, ApplyPolicyRequest); + +const baseApplyPolicyMetadata: object = { + $type: "yandex.cloud.backup.v1.ApplyPolicyMetadata", + policyId: "", + computeInstanceId: "", +}; + +export const ApplyPolicyMetadata = { + $type: "yandex.cloud.backup.v1.ApplyPolicyMetadata" as const, + + encode( + message: ApplyPolicyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ApplyPolicyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseApplyPolicyMetadata } as ApplyPolicyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ApplyPolicyMetadata { + const message = { ...baseApplyPolicyMetadata } as ApplyPolicyMetadata; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + return message; + }, + + toJSON(message: ApplyPolicyMetadata): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ApplyPolicyMetadata { + const message = { ...baseApplyPolicyMetadata } as ApplyPolicyMetadata; + message.policyId = object.policyId ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ApplyPolicyMetadata.$type, ApplyPolicyMetadata); + +const baseListApplicationsRequest: object = { + $type: "yandex.cloud.backup.v1.ListApplicationsRequest", +}; + +export const ListApplicationsRequest = { + $type: "yandex.cloud.backup.v1.ListApplicationsRequest" as const, + + encode( + message: ListApplicationsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== undefined) { + writer.uint32(10).string(message.folderId); + } + if (message.policyId !== undefined) { + writer.uint32(18).string(message.policyId); + } + if (message.computeInstanceId !== undefined) { + writer.uint32(26).string(message.computeInstanceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListApplicationsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListApplicationsRequest, + } as ListApplicationsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.policyId = reader.string(); + break; + case 3: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListApplicationsRequest { + const message = { + ...baseListApplicationsRequest, + } as ListApplicationsRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : undefined; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : undefined; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : undefined; + return message; + }, + + toJSON(message: ListApplicationsRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.policyId !== undefined && (obj.policyId = message.policyId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListApplicationsRequest { + const message = { + ...baseListApplicationsRequest, + } as ListApplicationsRequest; + message.folderId = object.folderId ?? undefined; + message.policyId = object.policyId ?? undefined; + message.computeInstanceId = object.computeInstanceId ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(ListApplicationsRequest.$type, ListApplicationsRequest); + +const baseListApplicationsResponse: object = { + $type: "yandex.cloud.backup.v1.ListApplicationsResponse", +}; + +export const ListApplicationsResponse = { + $type: "yandex.cloud.backup.v1.ListApplicationsResponse" as const, + + encode( + message: ListApplicationsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.applications) { + PolicyApplication.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListApplicationsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListApplicationsResponse, + } as ListApplicationsResponse; + message.applications = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.applications.push( + PolicyApplication.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListApplicationsResponse { + const message = { + ...baseListApplicationsResponse, + } as ListApplicationsResponse; + message.applications = (object.applications ?? []).map((e: any) => + PolicyApplication.fromJSON(e) + ); + return message; + }, + + toJSON(message: ListApplicationsResponse): unknown { + const obj: any = {}; + if (message.applications) { + obj.applications = message.applications.map((e) => + e ? PolicyApplication.toJSON(e) : undefined + ); + } else { + obj.applications = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ListApplicationsResponse { + const message = { + ...baseListApplicationsResponse, + } as ListApplicationsResponse; + message.applications = + object.applications?.map((e) => PolicyApplication.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + ListApplicationsResponse.$type, + ListApplicationsResponse +); + +const baseExecuteRequest: object = { + $type: "yandex.cloud.backup.v1.ExecuteRequest", + policyId: "", + computeInstanceId: "", +}; + +export const ExecuteRequest = { + $type: "yandex.cloud.backup.v1.ExecuteRequest" as const, + + encode( + message: ExecuteRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ExecuteRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExecuteRequest } as ExecuteRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExecuteRequest { + const message = { ...baseExecuteRequest } as ExecuteRequest; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + return message; + }, + + toJSON(message: ExecuteRequest): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExecuteRequest { + const message = { ...baseExecuteRequest } as ExecuteRequest; + message.policyId = object.policyId ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ExecuteRequest.$type, ExecuteRequest); + +const baseExecuteMetadata: object = { + $type: "yandex.cloud.backup.v1.ExecuteMetadata", + policyId: "", + computeInstanceId: "", +}; + +export const ExecuteMetadata = { + $type: "yandex.cloud.backup.v1.ExecuteMetadata" as const, + + encode( + message: ExecuteMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ExecuteMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExecuteMetadata } as ExecuteMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExecuteMetadata { + const message = { ...baseExecuteMetadata } as ExecuteMetadata; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + return message; + }, + + toJSON(message: ExecuteMetadata): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExecuteMetadata { + const message = { ...baseExecuteMetadata } as ExecuteMetadata; + message.policyId = object.policyId ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ExecuteMetadata.$type, ExecuteMetadata); + +const baseRevokeRequest: object = { + $type: "yandex.cloud.backup.v1.RevokeRequest", + policyId: "", + computeInstanceId: "", +}; + +export const RevokeRequest = { + $type: "yandex.cloud.backup.v1.RevokeRequest" as const, + + encode( + message: RevokeRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RevokeRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRevokeRequest } as RevokeRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RevokeRequest { + const message = { ...baseRevokeRequest } as RevokeRequest; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + return message; + }, + + toJSON(message: RevokeRequest): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RevokeRequest { + const message = { ...baseRevokeRequest } as RevokeRequest; + message.policyId = object.policyId ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RevokeRequest.$type, RevokeRequest); + +const baseRevokeMetadata: object = { + $type: "yandex.cloud.backup.v1.RevokeMetadata", + policyId: "", + computeInstanceId: "", +}; + +export const RevokeMetadata = { + $type: "yandex.cloud.backup.v1.RevokeMetadata" as const, + + encode( + message: RevokeMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.policyId !== "") { + writer.uint32(10).string(message.policyId); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RevokeMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRevokeMetadata } as RevokeMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.policyId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RevokeMetadata { + const message = { ...baseRevokeMetadata } as RevokeMetadata; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + return message; + }, + + toJSON(message: RevokeMetadata): unknown { + const obj: any = {}; + message.policyId !== undefined && (obj.policyId = message.policyId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RevokeMetadata { + const message = { ...baseRevokeMetadata } as RevokeMetadata; + message.policyId = object.policyId ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RevokeMetadata.$type, RevokeMetadata); + +/** A set of methods for managing [policies](/docs/backup/concepts/policy). */ +export const PolicyServiceService = { + /** List [policies](/docs/backup/concepts/policy) of specified folder. */ + list: { + path: "/yandex.cloud.backup.v1.PolicyService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListPoliciesRequest) => + Buffer.from(ListPoliciesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListPoliciesRequest.decode(value), + responseSerialize: (value: ListPoliciesResponse) => + Buffer.from(ListPoliciesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListPoliciesResponse.decode(value), + }, + /** + * Create a new policy. + * + * For detailed information, please see [Creating a backup policy](/docs/backup/operations/policy-vm/create). + */ + create: { + path: "/yandex.cloud.backup.v1.PolicyService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreatePolicyRequest) => + Buffer.from(CreatePolicyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreatePolicyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Get specific policy. */ + get: { + path: "/yandex.cloud.backup.v1.PolicyService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetPolicyRequest) => + Buffer.from(GetPolicyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetPolicyRequest.decode(value), + responseSerialize: (value: Policy) => + Buffer.from(Policy.encode(value).finish()), + responseDeserialize: (value: Buffer) => Policy.decode(value), + }, + /** Update specific policy. */ + update: { + path: "/yandex.cloud.backup.v1.PolicyService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdatePolicyRequest) => + Buffer.from(UpdatePolicyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdatePolicyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Delete specific policy. */ + delete: { + path: "/yandex.cloud.backup.v1.PolicyService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeletePolicyRequest) => + Buffer.from(DeletePolicyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeletePolicyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Apply policy to [Compute Cloud instance](/docs/backup/concepts/vm-connection#os). */ + apply: { + path: "/yandex.cloud.backup.v1.PolicyService/Apply", + requestStream: false, + responseStream: false, + requestSerialize: (value: ApplyPolicyRequest) => + Buffer.from(ApplyPolicyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ApplyPolicyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** List applied policies using filters. */ + listApplications: { + path: "/yandex.cloud.backup.v1.PolicyService/ListApplications", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListApplicationsRequest) => + Buffer.from(ListApplicationsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListApplicationsRequest.decode(value), + responseSerialize: (value: ListApplicationsResponse) => + Buffer.from(ListApplicationsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListApplicationsResponse.decode(value), + }, + /** + * Run policy on specific Compute Cloud instance. That will create backup + * according selected policy. In order to perform this action, policy should be + * applied to the Compute Cloud instance. + */ + execute: { + path: "/yandex.cloud.backup.v1.PolicyService/Execute", + requestStream: false, + responseStream: false, + requestSerialize: (value: ExecuteRequest) => + Buffer.from(ExecuteRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ExecuteRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Revoke policy from Compute Cloud instance. */ + revoke: { + path: "/yandex.cloud.backup.v1.PolicyService/Revoke", + requestStream: false, + responseStream: false, + requestSerialize: (value: RevokeRequest) => + Buffer.from(RevokeRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RevokeRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface PolicyServiceServer extends UntypedServiceImplementation { + /** List [policies](/docs/backup/concepts/policy) of specified folder. */ + list: handleUnaryCall; + /** + * Create a new policy. + * + * For detailed information, please see [Creating a backup policy](/docs/backup/operations/policy-vm/create). + */ + create: handleUnaryCall; + /** Get specific policy. */ + get: handleUnaryCall; + /** Update specific policy. */ + update: handleUnaryCall; + /** Delete specific policy. */ + delete: handleUnaryCall; + /** Apply policy to [Compute Cloud instance](/docs/backup/concepts/vm-connection#os). */ + apply: handleUnaryCall; + /** List applied policies using filters. */ + listApplications: handleUnaryCall< + ListApplicationsRequest, + ListApplicationsResponse + >; + /** + * Run policy on specific Compute Cloud instance. That will create backup + * according selected policy. In order to perform this action, policy should be + * applied to the Compute Cloud instance. + */ + execute: handleUnaryCall; + /** Revoke policy from Compute Cloud instance. */ + revoke: handleUnaryCall; +} + +export interface PolicyServiceClient extends Client { + /** List [policies](/docs/backup/concepts/policy) of specified folder. */ + list( + request: ListPoliciesRequest, + callback: ( + error: ServiceError | null, + response: ListPoliciesResponse + ) => void + ): ClientUnaryCall; + list( + request: ListPoliciesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListPoliciesResponse + ) => void + ): ClientUnaryCall; + list( + request: ListPoliciesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListPoliciesResponse + ) => void + ): ClientUnaryCall; + /** + * Create a new policy. + * + * For detailed information, please see [Creating a backup policy](/docs/backup/operations/policy-vm/create). + */ + create( + request: CreatePolicyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreatePolicyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreatePolicyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Get specific policy. */ + get( + request: GetPolicyRequest, + callback: (error: ServiceError | null, response: Policy) => void + ): ClientUnaryCall; + get( + request: GetPolicyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Policy) => void + ): ClientUnaryCall; + get( + request: GetPolicyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Policy) => void + ): ClientUnaryCall; + /** Update specific policy. */ + update( + request: UpdatePolicyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdatePolicyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdatePolicyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Delete specific policy. */ + delete( + request: DeletePolicyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeletePolicyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeletePolicyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Apply policy to [Compute Cloud instance](/docs/backup/concepts/vm-connection#os). */ + apply( + request: ApplyPolicyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + apply( + request: ApplyPolicyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + apply( + request: ApplyPolicyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** List applied policies using filters. */ + listApplications( + request: ListApplicationsRequest, + callback: ( + error: ServiceError | null, + response: ListApplicationsResponse + ) => void + ): ClientUnaryCall; + listApplications( + request: ListApplicationsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListApplicationsResponse + ) => void + ): ClientUnaryCall; + listApplications( + request: ListApplicationsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListApplicationsResponse + ) => void + ): ClientUnaryCall; + /** + * Run policy on specific Compute Cloud instance. That will create backup + * according selected policy. In order to perform this action, policy should be + * applied to the Compute Cloud instance. + */ + execute( + request: ExecuteRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + execute( + request: ExecuteRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + execute( + request: ExecuteRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Revoke policy from Compute Cloud instance. */ + revoke( + request: RevokeRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + revoke( + request: RevokeRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + revoke( + request: RevokeRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const PolicyServiceClient = makeGenericClientConstructor( + PolicyServiceService, + "yandex.cloud.backup.v1.PolicyService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): PolicyServiceClient; + service: typeof PolicyServiceService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/backup/v1/provider_service.ts b/src/generated/yandex/cloud/backup/v1/provider_service.ts new file mode 100644 index 00000000..ee1e952e --- /dev/null +++ b/src/generated/yandex/cloud/backup/v1/provider_service.ts @@ -0,0 +1,501 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.backup.v1"; + +export interface ActivateProviderRequest { + $type: "yandex.cloud.backup.v1.ActivateProviderRequest"; + /** Activate provider for Folder iD. */ + folderId: string; + /** + * Activate specific provider by name. + * + * For more information, please see [activate-provider](/docs/backup/quickstart#activate-provider) + */ + name: string; +} + +export interface ActivateProviderMetadata { + $type: "yandex.cloud.backup.v1.ActivateProviderMetadata"; + /** Activate provider for folder specified by ID. */ + folderId: string; +} + +export interface ListActivatedProvidersRequest { + $type: "yandex.cloud.backup.v1.ListActivatedProvidersRequest"; + /** ID of the folder to find out the backup provider. */ + folderId: string; +} + +export interface ListActivatedProvidersResponse { + $type: "yandex.cloud.backup.v1.ListActivatedProvidersResponse"; + /** Folder ID. */ + folderId: string; + /** Name of the backup provider. */ + names: string[]; +} + +const baseActivateProviderRequest: object = { + $type: "yandex.cloud.backup.v1.ActivateProviderRequest", + folderId: "", + name: "", +}; + +export const ActivateProviderRequest = { + $type: "yandex.cloud.backup.v1.ActivateProviderRequest" as const, + + encode( + message: ActivateProviderRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ActivateProviderRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseActivateProviderRequest, + } as ActivateProviderRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 3: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ActivateProviderRequest { + const message = { + ...baseActivateProviderRequest, + } as ActivateProviderRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: ActivateProviderRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): ActivateProviderRequest { + const message = { + ...baseActivateProviderRequest, + } as ActivateProviderRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ActivateProviderRequest.$type, ActivateProviderRequest); + +const baseActivateProviderMetadata: object = { + $type: "yandex.cloud.backup.v1.ActivateProviderMetadata", + folderId: "", +}; + +export const ActivateProviderMetadata = { + $type: "yandex.cloud.backup.v1.ActivateProviderMetadata" as const, + + encode( + message: ActivateProviderMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ActivateProviderMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseActivateProviderMetadata, + } as ActivateProviderMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ActivateProviderMetadata { + const message = { + ...baseActivateProviderMetadata, + } as ActivateProviderMetadata; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + return message; + }, + + toJSON(message: ActivateProviderMetadata): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ActivateProviderMetadata { + const message = { + ...baseActivateProviderMetadata, + } as ActivateProviderMetadata; + message.folderId = object.folderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ActivateProviderMetadata.$type, + ActivateProviderMetadata +); + +const baseListActivatedProvidersRequest: object = { + $type: "yandex.cloud.backup.v1.ListActivatedProvidersRequest", + folderId: "", +}; + +export const ListActivatedProvidersRequest = { + $type: "yandex.cloud.backup.v1.ListActivatedProvidersRequest" as const, + + encode( + message: ListActivatedProvidersRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListActivatedProvidersRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListActivatedProvidersRequest, + } as ListActivatedProvidersRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListActivatedProvidersRequest { + const message = { + ...baseListActivatedProvidersRequest, + } as ListActivatedProvidersRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + return message; + }, + + toJSON(message: ListActivatedProvidersRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListActivatedProvidersRequest { + const message = { + ...baseListActivatedProvidersRequest, + } as ListActivatedProvidersRequest; + message.folderId = object.folderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListActivatedProvidersRequest.$type, + ListActivatedProvidersRequest +); + +const baseListActivatedProvidersResponse: object = { + $type: "yandex.cloud.backup.v1.ListActivatedProvidersResponse", + folderId: "", + names: "", +}; + +export const ListActivatedProvidersResponse = { + $type: "yandex.cloud.backup.v1.ListActivatedProvidersResponse" as const, + + encode( + message: ListActivatedProvidersResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + for (const v of message.names) { + writer.uint32(26).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListActivatedProvidersResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListActivatedProvidersResponse, + } as ListActivatedProvidersResponse; + message.names = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 3: + message.names.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListActivatedProvidersResponse { + const message = { + ...baseListActivatedProvidersResponse, + } as ListActivatedProvidersResponse; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.names = (object.names ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: ListActivatedProvidersResponse): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + if (message.names) { + obj.names = message.names.map((e) => e); + } else { + obj.names = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ListActivatedProvidersResponse { + const message = { + ...baseListActivatedProvidersResponse, + } as ListActivatedProvidersResponse; + message.folderId = object.folderId ?? ""; + message.names = object.names?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + ListActivatedProvidersResponse.$type, + ListActivatedProvidersResponse +); + +/** A set of methods for managing [backup providers](/docs/backup/concepts/#providers). */ +export const ProviderServiceService = { + /** Activate provider for specified client. */ + activate: { + path: "/yandex.cloud.backup.v1.ProviderService/Activate", + requestStream: false, + responseStream: false, + requestSerialize: (value: ActivateProviderRequest) => + Buffer.from(ActivateProviderRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ActivateProviderRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** List activated providers for specified client. */ + listActivated: { + path: "/yandex.cloud.backup.v1.ProviderService/ListActivated", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListActivatedProvidersRequest) => + Buffer.from(ListActivatedProvidersRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListActivatedProvidersRequest.decode(value), + responseSerialize: (value: ListActivatedProvidersResponse) => + Buffer.from(ListActivatedProvidersResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListActivatedProvidersResponse.decode(value), + }, +} as const; + +export interface ProviderServiceServer extends UntypedServiceImplementation { + /** Activate provider for specified client. */ + activate: handleUnaryCall; + /** List activated providers for specified client. */ + listActivated: handleUnaryCall< + ListActivatedProvidersRequest, + ListActivatedProvidersResponse + >; +} + +export interface ProviderServiceClient extends Client { + /** Activate provider for specified client. */ + activate( + request: ActivateProviderRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + activate( + request: ActivateProviderRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + activate( + request: ActivateProviderRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** List activated providers for specified client. */ + listActivated( + request: ListActivatedProvidersRequest, + callback: ( + error: ServiceError | null, + response: ListActivatedProvidersResponse + ) => void + ): ClientUnaryCall; + listActivated( + request: ListActivatedProvidersRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListActivatedProvidersResponse + ) => void + ): ClientUnaryCall; + listActivated( + request: ListActivatedProvidersRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListActivatedProvidersResponse + ) => void + ): ClientUnaryCall; +} + +export const ProviderServiceClient = makeGenericClientConstructor( + ProviderServiceService, + "yandex.cloud.backup.v1.ProviderService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ProviderServiceClient; + service: typeof ProviderServiceService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/backup/v1/resource.ts b/src/generated/yandex/cloud/backup/v1/resource.ts new file mode 100644 index 00000000..719f0ea0 --- /dev/null +++ b/src/generated/yandex/cloud/backup/v1/resource.ts @@ -0,0 +1,827 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.backup.v1"; + +export interface Resource { + $type: "yandex.cloud.backup.v1.Resource"; + /** Compute Cloud instance ID. */ + computeInstanceId: string; + createdAt?: Date; + updatedAt?: Date; + /** If this field is true, it means that instance is online. */ + online: boolean; + /** If this field is true, it means that backup is enabled to instance. */ + enabled: boolean; + status: Resource_Status; + /** + * If status value is one of `OTHER` or `FAILED`, + * detailed info might be stored here. + */ + statusDetails: string; + /** + * In case status is one of `BACKUPING` or `RECOVERING`, + * progress value might be found here. + */ + statusProgress: number; + lastBackupTime?: Date; + nextBackupTime?: Date; + /** Resource ID is used to identify Compute Cloud instance in backup service. */ + resourceId: string; + /** + * Status `is_active` shows whether current Compute Cloud instance controls Cloud Backup resource. + * If status `is_active` is false it means Compute Cloud instance is not able to manipulate + * Cloud Backup resource. + */ + isActive: boolean; +} + +export enum Resource_Status { + STATUS_UNSPECIFIED = 0, + /** IDLE - Compute Cloud instance is doing nothing right now. */ + IDLE = 1, + /** BACKUPING - Compute Cloud instance is currently backing up itself. */ + BACKUPING = 2, + /** RECOVERING - Compute Cloud instance is currently recovering itself. */ + RECOVERING = 3, + /** + * FAILED - Compute Cloud instance is in failure state, check content of + * `status_details` field for more information. + */ + FAILED = 4, + /** + * OTHER - Unspecified state, check `status_details` field + * for more information. + */ + OTHER = 5, + UNRECOGNIZED = -1, +} + +export function resource_StatusFromJSON(object: any): Resource_Status { + switch (object) { + case 0: + case "STATUS_UNSPECIFIED": + return Resource_Status.STATUS_UNSPECIFIED; + case 1: + case "IDLE": + return Resource_Status.IDLE; + case 2: + case "BACKUPING": + return Resource_Status.BACKUPING; + case 3: + case "RECOVERING": + return Resource_Status.RECOVERING; + case 4: + case "FAILED": + return Resource_Status.FAILED; + case 5: + case "OTHER": + return Resource_Status.OTHER; + case -1: + case "UNRECOGNIZED": + default: + return Resource_Status.UNRECOGNIZED; + } +} + +export function resource_StatusToJSON(object: Resource_Status): string { + switch (object) { + case Resource_Status.STATUS_UNSPECIFIED: + return "STATUS_UNSPECIFIED"; + case Resource_Status.IDLE: + return "IDLE"; + case Resource_Status.BACKUPING: + return "BACKUPING"; + case Resource_Status.RECOVERING: + return "RECOVERING"; + case Resource_Status.FAILED: + return "FAILED"; + case Resource_Status.OTHER: + return "OTHER"; + default: + return "UNKNOWN"; + } +} + +export interface Progress { + $type: "yandex.cloud.backup.v1.Progress"; + current: number; + total: number; +} + +export interface Task { + $type: "yandex.cloud.backup.v1.Task"; + /** Task ID. */ + id: number; + /** + * Shows whether the task is cancellable. + * Note: task cancellation is not supported yet. + */ + cancellable: boolean; + /** Policy ID. */ + policyId: string; + /** Type of the task. */ + type: Task_Type; + /** Task progress. */ + progress?: Progress; + /** Task status. */ + status: Task_Status; + enqueuedAt?: Date; + startedAt?: Date; + updatedAt?: Date; + completedAt?: Date; + /** Compute Cloud instance ID. */ + computeInstanceId: string; +} + +export enum Task_Type { + TYPE_UNSPECIFIED = 0, + BACKUP = 1, + RETENTION = 2, + RECOVERY = 3, + UNRECOGNIZED = -1, +} + +export function task_TypeFromJSON(object: any): Task_Type { + switch (object) { + case 0: + case "TYPE_UNSPECIFIED": + return Task_Type.TYPE_UNSPECIFIED; + case 1: + case "BACKUP": + return Task_Type.BACKUP; + case 2: + case "RETENTION": + return Task_Type.RETENTION; + case 3: + case "RECOVERY": + return Task_Type.RECOVERY; + case -1: + case "UNRECOGNIZED": + default: + return Task_Type.UNRECOGNIZED; + } +} + +export function task_TypeToJSON(object: Task_Type): string { + switch (object) { + case Task_Type.TYPE_UNSPECIFIED: + return "TYPE_UNSPECIFIED"; + case Task_Type.BACKUP: + return "BACKUP"; + case Task_Type.RETENTION: + return "RETENTION"; + case Task_Type.RECOVERY: + return "RECOVERY"; + default: + return "UNKNOWN"; + } +} + +/** Status of task. */ +export enum Task_Status { + STATUS_UNSPECIFIED = 0, + ENQUEUED = 1, + ASSIGNED = 2, + STARTED = 3, + PAUSED = 4, + COMPLETED = 5, + UNRECOGNIZED = -1, +} + +export function task_StatusFromJSON(object: any): Task_Status { + switch (object) { + case 0: + case "STATUS_UNSPECIFIED": + return Task_Status.STATUS_UNSPECIFIED; + case 1: + case "ENQUEUED": + return Task_Status.ENQUEUED; + case 2: + case "ASSIGNED": + return Task_Status.ASSIGNED; + case 3: + case "STARTED": + return Task_Status.STARTED; + case 4: + case "PAUSED": + return Task_Status.PAUSED; + case 5: + case "COMPLETED": + return Task_Status.COMPLETED; + case -1: + case "UNRECOGNIZED": + default: + return Task_Status.UNRECOGNIZED; + } +} + +export function task_StatusToJSON(object: Task_Status): string { + switch (object) { + case Task_Status.STATUS_UNSPECIFIED: + return "STATUS_UNSPECIFIED"; + case Task_Status.ENQUEUED: + return "ENQUEUED"; + case Task_Status.ASSIGNED: + return "ASSIGNED"; + case Task_Status.STARTED: + return "STARTED"; + case Task_Status.PAUSED: + return "PAUSED"; + case Task_Status.COMPLETED: + return "COMPLETED"; + default: + return "UNKNOWN"; + } +} + +const baseResource: object = { + $type: "yandex.cloud.backup.v1.Resource", + computeInstanceId: "", + online: false, + enabled: false, + status: 0, + statusDetails: "", + statusProgress: 0, + resourceId: "", + isActive: false, +}; + +export const Resource = { + $type: "yandex.cloud.backup.v1.Resource" as const, + + encode( + message: Resource, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(18).fork() + ).ldelim(); + } + if (message.updatedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.updatedAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.online === true) { + writer.uint32(32).bool(message.online); + } + if (message.enabled === true) { + writer.uint32(40).bool(message.enabled); + } + if (message.status !== 0) { + writer.uint32(48).int32(message.status); + } + if (message.statusDetails !== "") { + writer.uint32(58).string(message.statusDetails); + } + if (message.statusProgress !== 0) { + writer.uint32(64).int64(message.statusProgress); + } + if (message.lastBackupTime !== undefined) { + Timestamp.encode( + toTimestamp(message.lastBackupTime), + writer.uint32(74).fork() + ).ldelim(); + } + if (message.nextBackupTime !== undefined) { + Timestamp.encode( + toTimestamp(message.nextBackupTime), + writer.uint32(82).fork() + ).ldelim(); + } + if (message.resourceId !== "") { + writer.uint32(90).string(message.resourceId); + } + if (message.isActive === true) { + writer.uint32(96).bool(message.isActive); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Resource { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseResource } as Resource; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 3: + message.updatedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.online = reader.bool(); + break; + case 5: + message.enabled = reader.bool(); + break; + case 6: + message.status = reader.int32() as any; + break; + case 7: + message.statusDetails = reader.string(); + break; + case 8: + message.statusProgress = longToNumber(reader.int64() as Long); + break; + case 9: + message.lastBackupTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 10: + message.nextBackupTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 11: + message.resourceId = reader.string(); + break; + case 12: + message.isActive = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Resource { + const message = { ...baseResource } as Resource; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.updatedAt = + object.updatedAt !== undefined && object.updatedAt !== null + ? fromJsonTimestamp(object.updatedAt) + : undefined; + message.online = + object.online !== undefined && object.online !== null + ? Boolean(object.online) + : false; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + message.status = + object.status !== undefined && object.status !== null + ? resource_StatusFromJSON(object.status) + : 0; + message.statusDetails = + object.statusDetails !== undefined && object.statusDetails !== null + ? String(object.statusDetails) + : ""; + message.statusProgress = + object.statusProgress !== undefined && object.statusProgress !== null + ? Number(object.statusProgress) + : 0; + message.lastBackupTime = + object.lastBackupTime !== undefined && object.lastBackupTime !== null + ? fromJsonTimestamp(object.lastBackupTime) + : undefined; + message.nextBackupTime = + object.nextBackupTime !== undefined && object.nextBackupTime !== null + ? fromJsonTimestamp(object.nextBackupTime) + : undefined; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + message.isActive = + object.isActive !== undefined && object.isActive !== null + ? Boolean(object.isActive) + : false; + return message; + }, + + toJSON(message: Resource): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.updatedAt !== undefined && + (obj.updatedAt = message.updatedAt.toISOString()); + message.online !== undefined && (obj.online = message.online); + message.enabled !== undefined && (obj.enabled = message.enabled); + message.status !== undefined && + (obj.status = resource_StatusToJSON(message.status)); + message.statusDetails !== undefined && + (obj.statusDetails = message.statusDetails); + message.statusProgress !== undefined && + (obj.statusProgress = Math.round(message.statusProgress)); + message.lastBackupTime !== undefined && + (obj.lastBackupTime = message.lastBackupTime.toISOString()); + message.nextBackupTime !== undefined && + (obj.nextBackupTime = message.nextBackupTime.toISOString()); + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + message.isActive !== undefined && (obj.isActive = message.isActive); + return obj; + }, + + fromPartial, I>>(object: I): Resource { + const message = { ...baseResource } as Resource; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.updatedAt = object.updatedAt ?? undefined; + message.online = object.online ?? false; + message.enabled = object.enabled ?? false; + message.status = object.status ?? 0; + message.statusDetails = object.statusDetails ?? ""; + message.statusProgress = object.statusProgress ?? 0; + message.lastBackupTime = object.lastBackupTime ?? undefined; + message.nextBackupTime = object.nextBackupTime ?? undefined; + message.resourceId = object.resourceId ?? ""; + message.isActive = object.isActive ?? false; + return message; + }, +}; + +messageTypeRegistry.set(Resource.$type, Resource); + +const baseProgress: object = { + $type: "yandex.cloud.backup.v1.Progress", + current: 0, + total: 0, +}; + +export const Progress = { + $type: "yandex.cloud.backup.v1.Progress" as const, + + encode( + message: Progress, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.current !== 0) { + writer.uint32(8).int64(message.current); + } + if (message.total !== 0) { + writer.uint32(16).int64(message.total); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Progress { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseProgress } as Progress; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.current = longToNumber(reader.int64() as Long); + break; + case 2: + message.total = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Progress { + const message = { ...baseProgress } as Progress; + message.current = + object.current !== undefined && object.current !== null + ? Number(object.current) + : 0; + message.total = + object.total !== undefined && object.total !== null + ? Number(object.total) + : 0; + return message; + }, + + toJSON(message: Progress): unknown { + const obj: any = {}; + message.current !== undefined && + (obj.current = Math.round(message.current)); + message.total !== undefined && (obj.total = Math.round(message.total)); + return obj; + }, + + fromPartial, I>>(object: I): Progress { + const message = { ...baseProgress } as Progress; + message.current = object.current ?? 0; + message.total = object.total ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Progress.$type, Progress); + +const baseTask: object = { + $type: "yandex.cloud.backup.v1.Task", + id: 0, + cancellable: false, + policyId: "", + type: 0, + status: 0, + computeInstanceId: "", +}; + +export const Task = { + $type: "yandex.cloud.backup.v1.Task" as const, + + encode(message: Task, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.id !== 0) { + writer.uint32(8).int64(message.id); + } + if (message.cancellable === true) { + writer.uint32(16).bool(message.cancellable); + } + if (message.policyId !== "") { + writer.uint32(26).string(message.policyId); + } + if (message.type !== 0) { + writer.uint32(32).int32(message.type); + } + if (message.progress !== undefined) { + Progress.encode(message.progress, writer.uint32(42).fork()).ldelim(); + } + if (message.status !== 0) { + writer.uint32(48).int32(message.status); + } + if (message.enqueuedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.enqueuedAt), + writer.uint32(58).fork() + ).ldelim(); + } + if (message.startedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.startedAt), + writer.uint32(66).fork() + ).ldelim(); + } + if (message.updatedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.updatedAt), + writer.uint32(74).fork() + ).ldelim(); + } + if (message.completedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.completedAt), + writer.uint32(82).fork() + ).ldelim(); + } + if (message.computeInstanceId !== "") { + writer.uint32(90).string(message.computeInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Task { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTask } as Task; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = longToNumber(reader.int64() as Long); + break; + case 2: + message.cancellable = reader.bool(); + break; + case 3: + message.policyId = reader.string(); + break; + case 4: + message.type = reader.int32() as any; + break; + case 5: + message.progress = Progress.decode(reader, reader.uint32()); + break; + case 6: + message.status = reader.int32() as any; + break; + case 7: + message.enqueuedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 8: + message.startedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 9: + message.updatedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 10: + message.completedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 11: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Task { + const message = { ...baseTask } as Task; + message.id = + object.id !== undefined && object.id !== null ? Number(object.id) : 0; + message.cancellable = + object.cancellable !== undefined && object.cancellable !== null + ? Boolean(object.cancellable) + : false; + message.policyId = + object.policyId !== undefined && object.policyId !== null + ? String(object.policyId) + : ""; + message.type = + object.type !== undefined && object.type !== null + ? task_TypeFromJSON(object.type) + : 0; + message.progress = + object.progress !== undefined && object.progress !== null + ? Progress.fromJSON(object.progress) + : undefined; + message.status = + object.status !== undefined && object.status !== null + ? task_StatusFromJSON(object.status) + : 0; + message.enqueuedAt = + object.enqueuedAt !== undefined && object.enqueuedAt !== null + ? fromJsonTimestamp(object.enqueuedAt) + : undefined; + message.startedAt = + object.startedAt !== undefined && object.startedAt !== null + ? fromJsonTimestamp(object.startedAt) + : undefined; + message.updatedAt = + object.updatedAt !== undefined && object.updatedAt !== null + ? fromJsonTimestamp(object.updatedAt) + : undefined; + message.completedAt = + object.completedAt !== undefined && object.completedAt !== null + ? fromJsonTimestamp(object.completedAt) + : undefined; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + return message; + }, + + toJSON(message: Task): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = Math.round(message.id)); + message.cancellable !== undefined && + (obj.cancellable = message.cancellable); + message.policyId !== undefined && (obj.policyId = message.policyId); + message.type !== undefined && (obj.type = task_TypeToJSON(message.type)); + message.progress !== undefined && + (obj.progress = message.progress + ? Progress.toJSON(message.progress) + : undefined); + message.status !== undefined && + (obj.status = task_StatusToJSON(message.status)); + message.enqueuedAt !== undefined && + (obj.enqueuedAt = message.enqueuedAt.toISOString()); + message.startedAt !== undefined && + (obj.startedAt = message.startedAt.toISOString()); + message.updatedAt !== undefined && + (obj.updatedAt = message.updatedAt.toISOString()); + message.completedAt !== undefined && + (obj.completedAt = message.completedAt.toISOString()); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>(object: I): Task { + const message = { ...baseTask } as Task; + message.id = object.id ?? 0; + message.cancellable = object.cancellable ?? false; + message.policyId = object.policyId ?? ""; + message.type = object.type ?? 0; + message.progress = + object.progress !== undefined && object.progress !== null + ? Progress.fromPartial(object.progress) + : undefined; + message.status = object.status ?? 0; + message.enqueuedAt = object.enqueuedAt ?? undefined; + message.startedAt = object.startedAt ?? undefined; + message.updatedAt = object.updatedAt ?? undefined; + message.completedAt = object.completedAt ?? undefined; + message.computeInstanceId = object.computeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Task.$type, Task); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/backup/v1/resource_service.ts b/src/generated/yandex/cloud/backup/v1/resource_service.ts new file mode 100644 index 00000000..649cc643 --- /dev/null +++ b/src/generated/yandex/cloud/backup/v1/resource_service.ts @@ -0,0 +1,1563 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Resource, Task } from "../../../../yandex/cloud/backup/v1/resource"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.backup.v1"; + +export interface ListResourcesRequest { + $type: "yandex.cloud.backup.v1.ListResourcesRequest"; + /** Folder ID. */ + folderId: string; + /** Number of results per page. */ + pageSize: number; + /** Token for the results page. */ + pageToken: string; +} + +export interface ListResourcesResponse { + $type: "yandex.cloud.backup.v1.ListResourcesResponse"; + /** Set of resource parameters. */ + resources: Resource[]; + /** Token for the next results page. */ + nextPageToken: string; +} + +export interface GetResourceRequest { + $type: "yandex.cloud.backup.v1.GetResourceRequest"; + /** Compute Cloud instance ID. */ + computeInstanceId: string; +} + +export interface GetResourceResponse { + $type: "yandex.cloud.backup.v1.GetResourceResponse"; + /** Set of resource parameters. */ + resource?: Resource; +} + +export interface DeleteResourceRequest { + $type: "yandex.cloud.backup.v1.DeleteResourceRequest"; + /** Compute Cloud instance ID. */ + computeInstanceId: string; + /** Resource ID is used to identify Compute Cloud instance in backup service. */ + resourceId: string; +} + +export interface DeleteResourceMetadata { + $type: "yandex.cloud.backup.v1.DeleteResourceMetadata"; + /** Compute Cloud instance ID. */ + computeInstanceId: string; +} + +export interface ListTasksRequest { + $type: "yandex.cloud.backup.v1.ListTasksRequest"; + /** Compute Cloud instance ID. */ + computeInstanceId: string; + /** Number of results per page. */ + pageSize: number; + /** Token for the results page. */ + pageToken: string; +} + +export interface ListTasksResponse { + $type: "yandex.cloud.backup.v1.ListTasksResponse"; + /** Set of tasks parameters. */ + tasks: Task[]; + /** Token for the next results page. */ + nextPageToken: string; +} + +export interface ListDirectoryRequest { + $type: "yandex.cloud.backup.v1.ListDirectoryRequest"; + /** Folder ID. */ + folderId: string; + /** Compute Cloud instance ID. */ + computeInstanceId: string; + /** Path to list items in. */ + path: string; +} + +export interface ListDirectoryResponse { + $type: "yandex.cloud.backup.v1.ListDirectoryResponse"; + items: ListDirectoryResponse_FilesystemItem[]; +} + +export interface ListDirectoryResponse_FilesystemItem { + $type: "yandex.cloud.backup.v1.ListDirectoryResponse.FilesystemItem"; + /** Item name. */ + name: string; + /** Might be Volume, Directory of File. */ + type: ListDirectoryResponse_FilesystemItem_Type; + /** Might be Directory or File. */ + fileType: ListDirectoryResponse_FilesystemItem_Type; + size: number; +} + +export enum ListDirectoryResponse_FilesystemItem_Type { + TYPE_UNSPECIFIED = 0, + VOLUME = 1, + DIRECTORY = 2, + FILE = 3, + UNRECOGNIZED = -1, +} + +export function listDirectoryResponse_FilesystemItem_TypeFromJSON( + object: any +): ListDirectoryResponse_FilesystemItem_Type { + switch (object) { + case 0: + case "TYPE_UNSPECIFIED": + return ListDirectoryResponse_FilesystemItem_Type.TYPE_UNSPECIFIED; + case 1: + case "VOLUME": + return ListDirectoryResponse_FilesystemItem_Type.VOLUME; + case 2: + case "DIRECTORY": + return ListDirectoryResponse_FilesystemItem_Type.DIRECTORY; + case 3: + case "FILE": + return ListDirectoryResponse_FilesystemItem_Type.FILE; + case -1: + case "UNRECOGNIZED": + default: + return ListDirectoryResponse_FilesystemItem_Type.UNRECOGNIZED; + } +} + +export function listDirectoryResponse_FilesystemItem_TypeToJSON( + object: ListDirectoryResponse_FilesystemItem_Type +): string { + switch (object) { + case ListDirectoryResponse_FilesystemItem_Type.TYPE_UNSPECIFIED: + return "TYPE_UNSPECIFIED"; + case ListDirectoryResponse_FilesystemItem_Type.VOLUME: + return "VOLUME"; + case ListDirectoryResponse_FilesystemItem_Type.DIRECTORY: + return "DIRECTORY"; + case ListDirectoryResponse_FilesystemItem_Type.FILE: + return "FILE"; + default: + return "UNKNOWN"; + } +} + +export interface CreateDirectoryRequest { + $type: "yandex.cloud.backup.v1.CreateDirectoryRequest"; + /** Folder ID. */ + folderId: string; + /** Compute Cloud instance ID. */ + computeInstanceId: string; + /** Path to create directory in. */ + path: string; +} + +export interface CreateDirectoryMetadata { + $type: "yandex.cloud.backup.v1.CreateDirectoryMetadata"; + /** Compute Cloud instance ID. */ + computeInstanceId: string; + /** Path to create directory metadata in. */ + path: string; +} + +const baseListResourcesRequest: object = { + $type: "yandex.cloud.backup.v1.ListResourcesRequest", + folderId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListResourcesRequest = { + $type: "yandex.cloud.backup.v1.ListResourcesRequest" as const, + + encode( + message: ListResourcesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListResourcesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListResourcesRequest } as ListResourcesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListResourcesRequest { + const message = { ...baseListResourcesRequest } as ListResourcesRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListResourcesRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListResourcesRequest { + const message = { ...baseListResourcesRequest } as ListResourcesRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListResourcesRequest.$type, ListResourcesRequest); + +const baseListResourcesResponse: object = { + $type: "yandex.cloud.backup.v1.ListResourcesResponse", + nextPageToken: "", +}; + +export const ListResourcesResponse = { + $type: "yandex.cloud.backup.v1.ListResourcesResponse" as const, + + encode( + message: ListResourcesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.resources) { + Resource.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListResourcesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListResourcesResponse } as ListResourcesResponse; + message.resources = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resources.push(Resource.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListResourcesResponse { + const message = { ...baseListResourcesResponse } as ListResourcesResponse; + message.resources = (object.resources ?? []).map((e: any) => + Resource.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListResourcesResponse): unknown { + const obj: any = {}; + if (message.resources) { + obj.resources = message.resources.map((e) => + e ? Resource.toJSON(e) : undefined + ); + } else { + obj.resources = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListResourcesResponse { + const message = { ...baseListResourcesResponse } as ListResourcesResponse; + message.resources = + object.resources?.map((e) => Resource.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListResourcesResponse.$type, ListResourcesResponse); + +const baseGetResourceRequest: object = { + $type: "yandex.cloud.backup.v1.GetResourceRequest", + computeInstanceId: "", +}; + +export const GetResourceRequest = { + $type: "yandex.cloud.backup.v1.GetResourceRequest" as const, + + encode( + message: GetResourceRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetResourceRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetResourceRequest } as GetResourceRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetResourceRequest { + const message = { ...baseGetResourceRequest } as GetResourceRequest; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + return message; + }, + + toJSON(message: GetResourceRequest): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetResourceRequest { + const message = { ...baseGetResourceRequest } as GetResourceRequest; + message.computeInstanceId = object.computeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetResourceRequest.$type, GetResourceRequest); + +const baseGetResourceResponse: object = { + $type: "yandex.cloud.backup.v1.GetResourceResponse", +}; + +export const GetResourceResponse = { + $type: "yandex.cloud.backup.v1.GetResourceResponse" as const, + + encode( + message: GetResourceResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resource !== undefined) { + Resource.encode(message.resource, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetResourceResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetResourceResponse } as GetResourceResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resource = Resource.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetResourceResponse { + const message = { ...baseGetResourceResponse } as GetResourceResponse; + message.resource = + object.resource !== undefined && object.resource !== null + ? Resource.fromJSON(object.resource) + : undefined; + return message; + }, + + toJSON(message: GetResourceResponse): unknown { + const obj: any = {}; + message.resource !== undefined && + (obj.resource = message.resource + ? Resource.toJSON(message.resource) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetResourceResponse { + const message = { ...baseGetResourceResponse } as GetResourceResponse; + message.resource = + object.resource !== undefined && object.resource !== null + ? Resource.fromPartial(object.resource) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(GetResourceResponse.$type, GetResourceResponse); + +const baseDeleteResourceRequest: object = { + $type: "yandex.cloud.backup.v1.DeleteResourceRequest", + computeInstanceId: "", + resourceId: "", +}; + +export const DeleteResourceRequest = { + $type: "yandex.cloud.backup.v1.DeleteResourceRequest" as const, + + encode( + message: DeleteResourceRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.resourceId !== "") { + writer.uint32(18).string(message.resourceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteResourceRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteResourceRequest } as DeleteResourceRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteResourceRequest { + const message = { ...baseDeleteResourceRequest } as DeleteResourceRequest; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: DeleteResourceRequest): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteResourceRequest { + const message = { ...baseDeleteResourceRequest } as DeleteResourceRequest; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteResourceRequest.$type, DeleteResourceRequest); + +const baseDeleteResourceMetadata: object = { + $type: "yandex.cloud.backup.v1.DeleteResourceMetadata", + computeInstanceId: "", +}; + +export const DeleteResourceMetadata = { + $type: "yandex.cloud.backup.v1.DeleteResourceMetadata" as const, + + encode( + message: DeleteResourceMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteResourceMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteResourceMetadata } as DeleteResourceMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteResourceMetadata { + const message = { ...baseDeleteResourceMetadata } as DeleteResourceMetadata; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + return message; + }, + + toJSON(message: DeleteResourceMetadata): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteResourceMetadata { + const message = { ...baseDeleteResourceMetadata } as DeleteResourceMetadata; + message.computeInstanceId = object.computeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteResourceMetadata.$type, DeleteResourceMetadata); + +const baseListTasksRequest: object = { + $type: "yandex.cloud.backup.v1.ListTasksRequest", + computeInstanceId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListTasksRequest = { + $type: "yandex.cloud.backup.v1.ListTasksRequest" as const, + + encode( + message: ListTasksRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListTasksRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListTasksRequest } as ListTasksRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListTasksRequest { + const message = { ...baseListTasksRequest } as ListTasksRequest; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListTasksRequest): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListTasksRequest { + const message = { ...baseListTasksRequest } as ListTasksRequest; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListTasksRequest.$type, ListTasksRequest); + +const baseListTasksResponse: object = { + $type: "yandex.cloud.backup.v1.ListTasksResponse", + nextPageToken: "", +}; + +export const ListTasksResponse = { + $type: "yandex.cloud.backup.v1.ListTasksResponse" as const, + + encode( + message: ListTasksResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.tasks) { + Task.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListTasksResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListTasksResponse } as ListTasksResponse; + message.tasks = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tasks.push(Task.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListTasksResponse { + const message = { ...baseListTasksResponse } as ListTasksResponse; + message.tasks = (object.tasks ?? []).map((e: any) => Task.fromJSON(e)); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListTasksResponse): unknown { + const obj: any = {}; + if (message.tasks) { + obj.tasks = message.tasks.map((e) => (e ? Task.toJSON(e) : undefined)); + } else { + obj.tasks = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListTasksResponse { + const message = { ...baseListTasksResponse } as ListTasksResponse; + message.tasks = object.tasks?.map((e) => Task.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListTasksResponse.$type, ListTasksResponse); + +const baseListDirectoryRequest: object = { + $type: "yandex.cloud.backup.v1.ListDirectoryRequest", + folderId: "", + computeInstanceId: "", + path: "", +}; + +export const ListDirectoryRequest = { + $type: "yandex.cloud.backup.v1.ListDirectoryRequest" as const, + + encode( + message: ListDirectoryRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + if (message.path !== "") { + writer.uint32(26).string(message.path); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListDirectoryRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListDirectoryRequest } as ListDirectoryRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + case 3: + message.path = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListDirectoryRequest { + const message = { ...baseListDirectoryRequest } as ListDirectoryRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.path = + object.path !== undefined && object.path !== null + ? String(object.path) + : ""; + return message; + }, + + toJSON(message: ListDirectoryRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.path !== undefined && (obj.path = message.path); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListDirectoryRequest { + const message = { ...baseListDirectoryRequest } as ListDirectoryRequest; + message.folderId = object.folderId ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.path = object.path ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListDirectoryRequest.$type, ListDirectoryRequest); + +const baseListDirectoryResponse: object = { + $type: "yandex.cloud.backup.v1.ListDirectoryResponse", +}; + +export const ListDirectoryResponse = { + $type: "yandex.cloud.backup.v1.ListDirectoryResponse" as const, + + encode( + message: ListDirectoryResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.items) { + ListDirectoryResponse_FilesystemItem.encode( + v!, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListDirectoryResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListDirectoryResponse } as ListDirectoryResponse; + message.items = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.items.push( + ListDirectoryResponse_FilesystemItem.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListDirectoryResponse { + const message = { ...baseListDirectoryResponse } as ListDirectoryResponse; + message.items = (object.items ?? []).map((e: any) => + ListDirectoryResponse_FilesystemItem.fromJSON(e) + ); + return message; + }, + + toJSON(message: ListDirectoryResponse): unknown { + const obj: any = {}; + if (message.items) { + obj.items = message.items.map((e) => + e ? ListDirectoryResponse_FilesystemItem.toJSON(e) : undefined + ); + } else { + obj.items = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ListDirectoryResponse { + const message = { ...baseListDirectoryResponse } as ListDirectoryResponse; + message.items = + object.items?.map((e) => + ListDirectoryResponse_FilesystemItem.fromPartial(e) + ) || []; + return message; + }, +}; + +messageTypeRegistry.set(ListDirectoryResponse.$type, ListDirectoryResponse); + +const baseListDirectoryResponse_FilesystemItem: object = { + $type: "yandex.cloud.backup.v1.ListDirectoryResponse.FilesystemItem", + name: "", + type: 0, + fileType: 0, + size: 0, +}; + +export const ListDirectoryResponse_FilesystemItem = { + $type: "yandex.cloud.backup.v1.ListDirectoryResponse.FilesystemItem" as const, + + encode( + message: ListDirectoryResponse_FilesystemItem, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.type !== 0) { + writer.uint32(16).int32(message.type); + } + if (message.fileType !== 0) { + writer.uint32(24).int32(message.fileType); + } + if (message.size !== 0) { + writer.uint32(32).int64(message.size); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListDirectoryResponse_FilesystemItem { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListDirectoryResponse_FilesystemItem, + } as ListDirectoryResponse_FilesystemItem; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.int32() as any; + break; + case 3: + message.fileType = reader.int32() as any; + break; + case 4: + message.size = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListDirectoryResponse_FilesystemItem { + const message = { + ...baseListDirectoryResponse_FilesystemItem, + } as ListDirectoryResponse_FilesystemItem; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.type = + object.type !== undefined && object.type !== null + ? listDirectoryResponse_FilesystemItem_TypeFromJSON(object.type) + : 0; + message.fileType = + object.fileType !== undefined && object.fileType !== null + ? listDirectoryResponse_FilesystemItem_TypeFromJSON(object.fileType) + : 0; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + return message; + }, + + toJSON(message: ListDirectoryResponse_FilesystemItem): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.type !== undefined && + (obj.type = listDirectoryResponse_FilesystemItem_TypeToJSON( + message.type + )); + message.fileType !== undefined && + (obj.fileType = listDirectoryResponse_FilesystemItem_TypeToJSON( + message.fileType + )); + message.size !== undefined && (obj.size = Math.round(message.size)); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListDirectoryResponse_FilesystemItem { + const message = { + ...baseListDirectoryResponse_FilesystemItem, + } as ListDirectoryResponse_FilesystemItem; + message.name = object.name ?? ""; + message.type = object.type ?? 0; + message.fileType = object.fileType ?? 0; + message.size = object.size ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + ListDirectoryResponse_FilesystemItem.$type, + ListDirectoryResponse_FilesystemItem +); + +const baseCreateDirectoryRequest: object = { + $type: "yandex.cloud.backup.v1.CreateDirectoryRequest", + folderId: "", + computeInstanceId: "", + path: "", +}; + +export const CreateDirectoryRequest = { + $type: "yandex.cloud.backup.v1.CreateDirectoryRequest" as const, + + encode( + message: CreateDirectoryRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + if (message.path !== "") { + writer.uint32(26).string(message.path); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateDirectoryRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateDirectoryRequest } as CreateDirectoryRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + case 3: + message.path = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateDirectoryRequest { + const message = { ...baseCreateDirectoryRequest } as CreateDirectoryRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.path = + object.path !== undefined && object.path !== null + ? String(object.path) + : ""; + return message; + }, + + toJSON(message: CreateDirectoryRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.path !== undefined && (obj.path = message.path); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateDirectoryRequest { + const message = { ...baseCreateDirectoryRequest } as CreateDirectoryRequest; + message.folderId = object.folderId ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.path = object.path ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateDirectoryRequest.$type, CreateDirectoryRequest); + +const baseCreateDirectoryMetadata: object = { + $type: "yandex.cloud.backup.v1.CreateDirectoryMetadata", + computeInstanceId: "", + path: "", +}; + +export const CreateDirectoryMetadata = { + $type: "yandex.cloud.backup.v1.CreateDirectoryMetadata" as const, + + encode( + message: CreateDirectoryMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.path !== "") { + writer.uint32(18).string(message.path); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateDirectoryMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateDirectoryMetadata, + } as CreateDirectoryMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.path = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateDirectoryMetadata { + const message = { + ...baseCreateDirectoryMetadata, + } as CreateDirectoryMetadata; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.path = + object.path !== undefined && object.path !== null + ? String(object.path) + : ""; + return message; + }, + + toJSON(message: CreateDirectoryMetadata): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.path !== undefined && (obj.path = message.path); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateDirectoryMetadata { + const message = { + ...baseCreateDirectoryMetadata, + } as CreateDirectoryMetadata; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.path = object.path ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateDirectoryMetadata.$type, CreateDirectoryMetadata); + +/** A set of methods for managing backup resources: [Compute Cloud instances](/docs/backup/concepts/vm-connection#os). */ +export const ResourceServiceService = { + /** List resources: Compute Cloud instances. */ + list: { + path: "/yandex.cloud.backup.v1.ResourceService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListResourcesRequest) => + Buffer.from(ListResourcesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListResourcesRequest.decode(value), + responseSerialize: (value: ListResourcesResponse) => + Buffer.from(ListResourcesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListResourcesResponse.decode(value), + }, + /** Get specific Compute Cloud instance. */ + get: { + path: "/yandex.cloud.backup.v1.ResourceService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetResourceRequest) => + Buffer.from(GetResourceRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetResourceRequest.decode(value), + responseSerialize: (value: GetResourceResponse) => + Buffer.from(GetResourceResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => GetResourceResponse.decode(value), + }, + /** + * Delete specific Compute Cloud instance from Cloud Backup. It does not delete + * instance from Cloud Compute service. + */ + delete: { + path: "/yandex.cloud.backup.v1.ResourceService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteResourceRequest) => + Buffer.from(DeleteResourceRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteResourceRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** List tasks of resources. */ + listTasks: { + path: "/yandex.cloud.backup.v1.ResourceService/ListTasks", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListTasksRequest) => + Buffer.from(ListTasksRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListTasksRequest.decode(value), + responseSerialize: (value: ListTasksResponse) => + Buffer.from(ListTasksResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListTasksResponse.decode(value), + }, + /** + * ListDirectory returns all subdirectories found in requested directory identified + * by the id. + */ + listDirectory: { + path: "/yandex.cloud.backup.v1.ResourceService/ListDirectory", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListDirectoryRequest) => + Buffer.from(ListDirectoryRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListDirectoryRequest.decode(value), + responseSerialize: (value: ListDirectoryResponse) => + Buffer.from(ListDirectoryResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListDirectoryResponse.decode(value), + }, + /** CreateDirectory creates new directory by requested path. */ + createDirectory: { + path: "/yandex.cloud.backup.v1.ResourceService/CreateDirectory", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateDirectoryRequest) => + Buffer.from(CreateDirectoryRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateDirectoryRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface ResourceServiceServer extends UntypedServiceImplementation { + /** List resources: Compute Cloud instances. */ + list: handleUnaryCall; + /** Get specific Compute Cloud instance. */ + get: handleUnaryCall; + /** + * Delete specific Compute Cloud instance from Cloud Backup. It does not delete + * instance from Cloud Compute service. + */ + delete: handleUnaryCall; + /** List tasks of resources. */ + listTasks: handleUnaryCall; + /** + * ListDirectory returns all subdirectories found in requested directory identified + * by the id. + */ + listDirectory: handleUnaryCall; + /** CreateDirectory creates new directory by requested path. */ + createDirectory: handleUnaryCall; +} + +export interface ResourceServiceClient extends Client { + /** List resources: Compute Cloud instances. */ + list( + request: ListResourcesRequest, + callback: ( + error: ServiceError | null, + response: ListResourcesResponse + ) => void + ): ClientUnaryCall; + list( + request: ListResourcesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListResourcesResponse + ) => void + ): ClientUnaryCall; + list( + request: ListResourcesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListResourcesResponse + ) => void + ): ClientUnaryCall; + /** Get specific Compute Cloud instance. */ + get( + request: GetResourceRequest, + callback: ( + error: ServiceError | null, + response: GetResourceResponse + ) => void + ): ClientUnaryCall; + get( + request: GetResourceRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: GetResourceResponse + ) => void + ): ClientUnaryCall; + get( + request: GetResourceRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: GetResourceResponse + ) => void + ): ClientUnaryCall; + /** + * Delete specific Compute Cloud instance from Cloud Backup. It does not delete + * instance from Cloud Compute service. + */ + delete( + request: DeleteResourceRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteResourceRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteResourceRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** List tasks of resources. */ + listTasks( + request: ListTasksRequest, + callback: (error: ServiceError | null, response: ListTasksResponse) => void + ): ClientUnaryCall; + listTasks( + request: ListTasksRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: ListTasksResponse) => void + ): ClientUnaryCall; + listTasks( + request: ListTasksRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: ListTasksResponse) => void + ): ClientUnaryCall; + /** + * ListDirectory returns all subdirectories found in requested directory identified + * by the id. + */ + listDirectory( + request: ListDirectoryRequest, + callback: ( + error: ServiceError | null, + response: ListDirectoryResponse + ) => void + ): ClientUnaryCall; + listDirectory( + request: ListDirectoryRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListDirectoryResponse + ) => void + ): ClientUnaryCall; + listDirectory( + request: ListDirectoryRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListDirectoryResponse + ) => void + ): ClientUnaryCall; + /** CreateDirectory creates new directory by requested path. */ + createDirectory( + request: CreateDirectoryRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + createDirectory( + request: CreateDirectoryRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + createDirectory( + request: CreateDirectoryRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const ResourceServiceClient = makeGenericClientConstructor( + ResourceServiceService, + "yandex.cloud.backup.v1.ResourceService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ResourceServiceClient; + service: typeof ResourceServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/billing/v1/sku_service.ts b/src/generated/yandex/cloud/billing/v1/sku_service.ts index bc1954af..b42b142e 100644 --- a/src/generated/yandex/cloud/billing/v1/sku_service.ts +++ b/src/generated/yandex/cloud/billing/v1/sku_service.ts @@ -35,7 +35,7 @@ export interface GetSkuRequest { currency: string; /** * Optional ID of the billing account. - * If specified, contract prices for concrete billing account are included in the response. + * If specified, contract prices for a particular billing account are included in the response. * To get the billing account ID, use [BillingAccountService.List] request. */ billingAccountId: string; @@ -53,7 +53,7 @@ export interface ListSkusRequest { currency: string; /** * Optional ID of the billing account. - * If specified, contract prices for concrete billing account are included in the response. + * If specified, contract prices for a particular billing account are included in the response. * To get the billing account ID, use [BillingAccountService.List] request. */ billingAccountId: string; diff --git a/src/generated/yandex/cloud/cdn/v1/raw_logs.ts b/src/generated/yandex/cloud/cdn/v1/raw_logs.ts index f45f2616..800587bc 100644 --- a/src/generated/yandex/cloud/cdn/v1/raw_logs.ts +++ b/src/generated/yandex/cloud/cdn/v1/raw_logs.ts @@ -12,8 +12,10 @@ export enum RawLogsStatus { RAW_LOGS_STATUS_NOT_ACTIVATED = 1, /** RAW_LOGS_STATUS_OK - Raw logs was activated, and logs storing process works as expected. */ RAW_LOGS_STATUS_OK = 2, - /** RAW_LOGS_STATUS_FAILED - Raw logs was activated, but logs CDN provider has been failed to store logs. */ + /** RAW_LOGS_STATUS_FAILED - Raw logs was activated, but CDN provider has been failed to store logs. */ RAW_LOGS_STATUS_FAILED = 3, + /** RAW_LOGS_STATUS_PENDING - Raw logs was activated, but logs storing process is expected. */ + RAW_LOGS_STATUS_PENDING = 4, UNRECOGNIZED = -1, } @@ -31,6 +33,9 @@ export function rawLogsStatusFromJSON(object: any): RawLogsStatus { case 3: case "RAW_LOGS_STATUS_FAILED": return RawLogsStatus.RAW_LOGS_STATUS_FAILED; + case 4: + case "RAW_LOGS_STATUS_PENDING": + return RawLogsStatus.RAW_LOGS_STATUS_PENDING; case -1: case "UNRECOGNIZED": default: @@ -48,6 +53,8 @@ export function rawLogsStatusToJSON(object: RawLogsStatus): string { return "RAW_LOGS_STATUS_OK"; case RawLogsStatus.RAW_LOGS_STATUS_FAILED: return "RAW_LOGS_STATUS_FAILED"; + case RawLogsStatus.RAW_LOGS_STATUS_PENDING: + return "RAW_LOGS_STATUS_PENDING"; default: return "UNKNOWN"; } diff --git a/src/generated/yandex/cloud/cdn/v1/resource.ts b/src/generated/yandex/cloud/cdn/v1/resource.ts index f826fced..fc32e1ee 100644 --- a/src/generated/yandex/cloud/cdn/v1/resource.ts +++ b/src/generated/yandex/cloud/cdn/v1/resource.ts @@ -255,6 +255,14 @@ export interface Resource { originProtocol: OriginProtocol; /** SSL certificate options. */ sslCertificate?: SSLCertificate; + /** Labels of the resource. */ + labels: { [key: string]: string }; +} + +export interface Resource_LabelsEntry { + $type: "yandex.cloud.cdn.v1.Resource.LabelsEntry"; + key: string; + value: string; } /** A major set of various resource options. */ @@ -725,6 +733,16 @@ export const Resource = { writer.uint32(98).fork() ).ldelim(); } + Object.entries(message.labels).forEach(([key, value]) => { + Resource_LabelsEntry.encode( + { + $type: "yandex.cloud.cdn.v1.Resource.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(106).fork() + ).ldelim(); + }); return writer; }, @@ -733,6 +751,7 @@ export const Resource = { let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseResource } as Resource; message.secondaryHostnames = []; + message.labels = {}; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -779,6 +798,12 @@ export const Resource = { reader.uint32() ); break; + case 13: + const entry13 = Resource_LabelsEntry.decode(reader, reader.uint32()); + if (entry13.value !== undefined) { + message.labels[entry13.key] = entry13.value; + } + break; default: reader.skipType(tag & 7); break; @@ -834,6 +859,12 @@ export const Resource = { object.sslCertificate !== undefined && object.sslCertificate !== null ? SSLCertificate.fromJSON(object.sslCertificate) : undefined; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); return message; }, @@ -866,6 +897,12 @@ export const Resource = { (obj.sslCertificate = message.sslCertificate ? SSLCertificate.toJSON(message.sslCertificate) : undefined); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } return obj; }, @@ -889,12 +926,96 @@ export const Resource = { object.sslCertificate !== undefined && object.sslCertificate !== null ? SSLCertificate.fromPartial(object.sslCertificate) : undefined; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); return message; }, }; messageTypeRegistry.set(Resource.$type, Resource); +const baseResource_LabelsEntry: object = { + $type: "yandex.cloud.cdn.v1.Resource.LabelsEntry", + key: "", + value: "", +}; + +export const Resource_LabelsEntry = { + $type: "yandex.cloud.cdn.v1.Resource.LabelsEntry" as const, + + encode( + message: Resource_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Resource_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseResource_LabelsEntry } as Resource_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Resource_LabelsEntry { + const message = { ...baseResource_LabelsEntry } as Resource_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Resource_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): Resource_LabelsEntry { + const message = { ...baseResource_LabelsEntry } as Resource_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Resource_LabelsEntry.$type, Resource_LabelsEntry); + const baseResourceOptions: object = { $type: "yandex.cloud.cdn.v1.ResourceOptions", }; diff --git a/src/generated/yandex/cloud/cdn/v1/resource_service.ts b/src/generated/yandex/cloud/cdn/v1/resource_service.ts index b3d961af..19339e5c 100644 --- a/src/generated/yandex/cloud/cdn/v1/resource_service.ts +++ b/src/generated/yandex/cloud/cdn/v1/resource_service.ts @@ -91,6 +91,8 @@ export interface CreateResourceRequest { options?: ResourceOptions; /** SSL Certificate options. */ sslCertificate?: SSLTargetCertificate; + /** Labels of the resource. */ + labels: { [key: string]: string }; } export interface CreateResourceRequest_Origin { @@ -106,6 +108,12 @@ export interface CreateResourceRequest_Origin { originSourceParams?: ResourceOriginParams | undefined; } +export interface CreateResourceRequest_LabelsEntry { + $type: "yandex.cloud.cdn.v1.CreateResourceRequest.LabelsEntry"; + key: string; + value: string; +} + /** A set of resource origin parameters. */ export interface ResourceOriginParams { $type: "yandex.cloud.cdn.v1.ResourceOriginParams"; @@ -141,6 +149,14 @@ export interface UpdateResourceRequest { active?: boolean; /** SSL Certificate options. */ sslCertificate?: SSLTargetCertificate; + /** Resource labels. At some point will be needed for granular detailing. */ + labels: { [key: string]: string }; +} + +export interface UpdateResourceRequest_LabelsEntry { + $type: "yandex.cloud.cdn.v1.UpdateResourceRequest.LabelsEntry"; + key: string; + value: string; } export interface UpdateResourceMetadata { @@ -467,6 +483,16 @@ export const CreateResourceRequest = { writer.uint32(66).fork() ).ldelim(); } + Object.entries(message.labels).forEach(([key, value]) => { + CreateResourceRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.cdn.v1.CreateResourceRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(74).fork() + ).ldelim(); + }); return writer; }, @@ -477,6 +503,7 @@ export const CreateResourceRequest = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseCreateResourceRequest } as CreateResourceRequest; + message.labels = {}; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -513,6 +540,15 @@ export const CreateResourceRequest = { reader.uint32() ); break; + case 9: + const entry9 = CreateResourceRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry9.value !== undefined) { + message.labels[entry9.key] = entry9.value; + } + break; default: reader.skipType(tag & 7); break; @@ -556,6 +592,12 @@ export const CreateResourceRequest = { object.sslCertificate !== undefined && object.sslCertificate !== null ? SSLTargetCertificate.fromJSON(object.sslCertificate) : undefined; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); return message; }, @@ -582,6 +624,12 @@ export const CreateResourceRequest = { (obj.sslCertificate = message.sslCertificate ? SSLTargetCertificate.toJSON(message.sslCertificate) : undefined); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } return obj; }, @@ -610,6 +658,14 @@ export const CreateResourceRequest = { object.sslCertificate !== undefined && object.sslCertificate !== null ? SSLTargetCertificate.fromPartial(object.sslCertificate) : undefined; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); return message; }, }; @@ -729,6 +785,91 @@ messageTypeRegistry.set( CreateResourceRequest_Origin ); +const baseCreateResourceRequest_LabelsEntry: object = { + $type: "yandex.cloud.cdn.v1.CreateResourceRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateResourceRequest_LabelsEntry = { + $type: "yandex.cloud.cdn.v1.CreateResourceRequest.LabelsEntry" as const, + + encode( + message: CreateResourceRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateResourceRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateResourceRequest_LabelsEntry, + } as CreateResourceRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateResourceRequest_LabelsEntry { + const message = { + ...baseCreateResourceRequest_LabelsEntry, + } as CreateResourceRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateResourceRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateResourceRequest_LabelsEntry { + const message = { + ...baseCreateResourceRequest_LabelsEntry, + } as CreateResourceRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateResourceRequest_LabelsEntry.$type, + CreateResourceRequest_LabelsEntry +); + const baseResourceOriginParams: object = { $type: "yandex.cloud.cdn.v1.ResourceOriginParams", source: "", @@ -924,6 +1065,16 @@ export const UpdateResourceRequest = { writer.uint32(58).fork() ).ldelim(); } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateResourceRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.cdn.v1.UpdateResourceRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(66).fork() + ).ldelim(); + }); return writer; }, @@ -934,6 +1085,7 @@ export const UpdateResourceRequest = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseUpdateResourceRequest } as UpdateResourceRequest; + message.labels = {}; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -967,6 +1119,15 @@ export const UpdateResourceRequest = { reader.uint32() ); break; + case 8: + const entry8 = UpdateResourceRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry8.value !== undefined) { + message.labels[entry8.key] = entry8.value; + } + break; default: reader.skipType(tag & 7); break; @@ -1006,6 +1167,12 @@ export const UpdateResourceRequest = { object.sslCertificate !== undefined && object.sslCertificate !== null ? SSLTargetCertificate.fromJSON(object.sslCertificate) : undefined; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); return message; }, @@ -1029,6 +1196,12 @@ export const UpdateResourceRequest = { (obj.sslCertificate = message.sslCertificate ? SSLTargetCertificate.toJSON(message.sslCertificate) : undefined); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } return obj; }, @@ -1053,12 +1226,105 @@ export const UpdateResourceRequest = { object.sslCertificate !== undefined && object.sslCertificate !== null ? SSLTargetCertificate.fromPartial(object.sslCertificate) : undefined; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); return message; }, }; messageTypeRegistry.set(UpdateResourceRequest.$type, UpdateResourceRequest); +const baseUpdateResourceRequest_LabelsEntry: object = { + $type: "yandex.cloud.cdn.v1.UpdateResourceRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateResourceRequest_LabelsEntry = { + $type: "yandex.cloud.cdn.v1.UpdateResourceRequest.LabelsEntry" as const, + + encode( + message: UpdateResourceRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateResourceRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateResourceRequest_LabelsEntry, + } as UpdateResourceRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateResourceRequest_LabelsEntry { + const message = { + ...baseUpdateResourceRequest_LabelsEntry, + } as UpdateResourceRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateResourceRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateResourceRequest_LabelsEntry { + const message = { + ...baseUpdateResourceRequest_LabelsEntry, + } as UpdateResourceRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateResourceRequest_LabelsEntry.$type, + UpdateResourceRequest_LabelsEntry +); + const baseUpdateResourceMetadata: object = { $type: "yandex.cloud.cdn.v1.UpdateResourceMetadata", resourceId: "", diff --git a/src/generated/yandex/cloud/certificatemanager/v1/certificate.ts b/src/generated/yandex/cloud/certificatemanager/v1/certificate.ts index 6bc93350..5004628a 100644 --- a/src/generated/yandex/cloud/certificatemanager/v1/certificate.ts +++ b/src/generated/yandex/cloud/certificatemanager/v1/certificate.ts @@ -88,7 +88,7 @@ export function challengeTypeToJSON(object: ChallengeType): string { } } -/** A certificate. For details about the concept, see [documentation](docs/certificate-manager/concepts/). */ +/** A certificate. For details about the concept, see [documentation](/docs/certificate-manager/concepts/). */ export interface Certificate { $type: "yandex.cloud.certificatemanager.v1.Certificate"; /** ID of the certificate. Generated at creation time. */ @@ -130,6 +130,8 @@ export interface Certificate { challenges: Challenge[]; /** Flag that protects deletion of the certificate */ deletionProtection: boolean; + /** Mark imported certificates without uploaded chain or with chain which not lead to root certificate */ + incompleteChain: boolean; } export enum Certificate_Status { @@ -325,6 +327,7 @@ const baseCertificate: object = { subject: "", serial: "", deletionProtection: false, + incompleteChain: false, }; export const Certificate = { @@ -410,6 +413,9 @@ export const Certificate = { if (message.deletionProtection === true) { writer.uint32(144).bool(message.deletionProtection); } + if (message.incompleteChain === true) { + writer.uint32(152).bool(message.incompleteChain); + } return writer; }, @@ -493,6 +499,9 @@ export const Certificate = { case 18: message.deletionProtection = reader.bool(); break; + case 19: + message.incompleteChain = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -572,6 +581,10 @@ export const Certificate = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.incompleteChain = + object.incompleteChain !== undefined && object.incompleteChain !== null + ? Boolean(object.incompleteChain) + : false; return message; }, @@ -619,6 +632,8 @@ export const Certificate = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.incompleteChain !== undefined && + (obj.incompleteChain = message.incompleteChain); return obj; }, @@ -652,6 +667,7 @@ export const Certificate = { message.challenges = object.challenges?.map((e) => Challenge.fromPartial(e)) || []; message.deletionProtection = object.deletionProtection ?? false; + message.incompleteChain = object.incompleteChain ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/index.ts b/src/generated/yandex/cloud/compute/index.ts index b474f30b..f7020946 100644 --- a/src/generated/yandex/cloud/compute/index.ts +++ b/src/generated/yandex/cloud/compute/index.ts @@ -6,6 +6,8 @@ export * as disk_type from './v1/disk_type' export * as disk_type_service from './v1/disk_type_service' export * as filesystem from './v1/filesystem' export * as filesystem_service from './v1/filesystem_service' +export * as gpu_cluster from './v1/gpu_cluster' +export * as gpu_cluster_service from './v1/gpu_cluster_service' export * as host_group from './v1/host_group' export * as host_group_service from './v1/host_group_service' export * as host_type from './v1/host_type' diff --git a/src/generated/yandex/cloud/compute/v1/disk.ts b/src/generated/yandex/cloud/compute/v1/disk.ts index 63a50352..c14dcc2f 100644 --- a/src/generated/yandex/cloud/compute/v1/disk.ts +++ b/src/generated/yandex/cloud/compute/v1/disk.ts @@ -115,6 +115,7 @@ export interface DiskPlacementPolicy { $type: "yandex.cloud.compute.v1.DiskPlacementPolicy"; /** Placement group ID. */ placementGroupId: string; + placementGroupPartition: number; } const baseDisk: object = { @@ -492,6 +493,7 @@ messageTypeRegistry.set(Disk_LabelsEntry.$type, Disk_LabelsEntry); const baseDiskPlacementPolicy: object = { $type: "yandex.cloud.compute.v1.DiskPlacementPolicy", placementGroupId: "", + placementGroupPartition: 0, }; export const DiskPlacementPolicy = { @@ -504,6 +506,9 @@ export const DiskPlacementPolicy = { if (message.placementGroupId !== "") { writer.uint32(10).string(message.placementGroupId); } + if (message.placementGroupPartition !== 0) { + writer.uint32(16).int64(message.placementGroupPartition); + } return writer; }, @@ -517,6 +522,11 @@ export const DiskPlacementPolicy = { case 1: message.placementGroupId = reader.string(); break; + case 2: + message.placementGroupPartition = longToNumber( + reader.int64() as Long + ); + break; default: reader.skipType(tag & 7); break; @@ -531,6 +541,11 @@ export const DiskPlacementPolicy = { object.placementGroupId !== undefined && object.placementGroupId !== null ? String(object.placementGroupId) : ""; + message.placementGroupPartition = + object.placementGroupPartition !== undefined && + object.placementGroupPartition !== null + ? Number(object.placementGroupPartition) + : 0; return message; }, @@ -538,6 +553,10 @@ export const DiskPlacementPolicy = { const obj: any = {}; message.placementGroupId !== undefined && (obj.placementGroupId = message.placementGroupId); + message.placementGroupPartition !== undefined && + (obj.placementGroupPartition = Math.round( + message.placementGroupPartition + )); return obj; }, @@ -546,6 +565,7 @@ export const DiskPlacementPolicy = { ): DiskPlacementPolicy { const message = { ...baseDiskPlacementPolicy } as DiskPlacementPolicy; message.placementGroupId = object.placementGroupId ?? ""; + message.placementGroupPartition = object.placementGroupPartition ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/disk_placement_group.ts b/src/generated/yandex/cloud/compute/v1/disk_placement_group.ts index c3a25cc4..5b119b91 100644 --- a/src/generated/yandex/cloud/compute/v1/disk_placement_group.ts +++ b/src/generated/yandex/cloud/compute/v1/disk_placement_group.ts @@ -27,8 +27,10 @@ export interface DiskPlacementGroup { zoneId: string; /** Current status of the placement group */ status: DiskPlacementGroup_Status; - /** Distribute instances over distinct failure domains. */ + /** Distribute disks over distinct failure domains. */ spreadPlacementStrategy?: DiskSpreadPlacementStrategy | undefined; + /** Distribute disks over partitions. */ + partitionPlacementStrategy?: DiskPartitionPlacementStrategy | undefined; } export enum DiskPlacementGroup_Status { @@ -89,6 +91,11 @@ export interface DiskSpreadPlacementStrategy { $type: "yandex.cloud.compute.v1.DiskSpreadPlacementStrategy"; } +export interface DiskPartitionPlacementStrategy { + $type: "yandex.cloud.compute.v1.DiskPartitionPlacementStrategy"; + partitions: number; +} + const baseDiskPlacementGroup: object = { $type: "yandex.cloud.compute.v1.DiskPlacementGroup", id: "", @@ -146,6 +153,12 @@ export const DiskPlacementGroup = { writer.uint32(66).fork() ).ldelim(); } + if (message.partitionPlacementStrategy !== undefined) { + DiskPartitionPlacementStrategy.encode( + message.partitionPlacementStrategy, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -195,6 +208,10 @@ export const DiskPlacementGroup = { reader.uint32() ); break; + case 9: + message.partitionPlacementStrategy = + DiskPartitionPlacementStrategy.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -242,6 +259,13 @@ export const DiskPlacementGroup = { object.spreadPlacementStrategy !== null ? DiskSpreadPlacementStrategy.fromJSON(object.spreadPlacementStrategy) : undefined; + message.partitionPlacementStrategy = + object.partitionPlacementStrategy !== undefined && + object.partitionPlacementStrategy !== null + ? DiskPartitionPlacementStrategy.fromJSON( + object.partitionPlacementStrategy + ) + : undefined; return message; }, @@ -267,6 +291,12 @@ export const DiskPlacementGroup = { (obj.spreadPlacementStrategy = message.spreadPlacementStrategy ? DiskSpreadPlacementStrategy.toJSON(message.spreadPlacementStrategy) : undefined); + message.partitionPlacementStrategy !== undefined && + (obj.partitionPlacementStrategy = message.partitionPlacementStrategy + ? DiskPartitionPlacementStrategy.toJSON( + message.partitionPlacementStrategy + ) + : undefined); return obj; }, @@ -296,6 +326,13 @@ export const DiskPlacementGroup = { object.spreadPlacementStrategy ) : undefined; + message.partitionPlacementStrategy = + object.partitionPlacementStrategy !== undefined && + object.partitionPlacementStrategy !== null + ? DiskPartitionPlacementStrategy.fromPartial( + object.partitionPlacementStrategy + ) + : undefined; return message; }, }; @@ -448,6 +485,92 @@ messageTypeRegistry.set( DiskSpreadPlacementStrategy ); +const baseDiskPartitionPlacementStrategy: object = { + $type: "yandex.cloud.compute.v1.DiskPartitionPlacementStrategy", + partitions: 0, +}; + +export const DiskPartitionPlacementStrategy = { + $type: "yandex.cloud.compute.v1.DiskPartitionPlacementStrategy" as const, + + encode( + message: DiskPartitionPlacementStrategy, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.partitions !== 0) { + writer.uint32(8).int64(message.partitions); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DiskPartitionPlacementStrategy { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDiskPartitionPlacementStrategy, + } as DiskPartitionPlacementStrategy; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.partitions = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DiskPartitionPlacementStrategy { + const message = { + ...baseDiskPartitionPlacementStrategy, + } as DiskPartitionPlacementStrategy; + message.partitions = + object.partitions !== undefined && object.partitions !== null + ? Number(object.partitions) + : 0; + return message; + }, + + toJSON(message: DiskPartitionPlacementStrategy): unknown { + const obj: any = {}; + message.partitions !== undefined && + (obj.partitions = Math.round(message.partitions)); + return obj; + }, + + fromPartial, I>>( + object: I + ): DiskPartitionPlacementStrategy { + const message = { + ...baseDiskPartitionPlacementStrategy, + } as DiskPartitionPlacementStrategy; + message.partitions = object.partitions ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + DiskPartitionPlacementStrategy.$type, + DiskPartitionPlacementStrategy +); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + type Builtin = | Date | Function @@ -497,6 +620,13 @@ function fromJsonTimestamp(o: any): Date { } } +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + if (_m0.util.Long !== Long) { _m0.util.Long = Long as any; _m0.configure(); diff --git a/src/generated/yandex/cloud/compute/v1/disk_placement_group_service.ts b/src/generated/yandex/cloud/compute/v1/disk_placement_group_service.ts index 82f38f07..beab6b92 100644 --- a/src/generated/yandex/cloud/compute/v1/disk_placement_group_service.ts +++ b/src/generated/yandex/cloud/compute/v1/disk_placement_group_service.ts @@ -18,6 +18,7 @@ import { FieldMask } from "../../../../google/protobuf/field_mask"; import { DiskPlacementGroup, DiskSpreadPlacementStrategy, + DiskPartitionPlacementStrategy, } from "../../../../yandex/cloud/compute/v1/disk_placement_group"; import { Disk } from "../../../../yandex/cloud/compute/v1/disk"; import { Operation } from "../../../../yandex/cloud/operation/operation"; @@ -55,9 +56,21 @@ export interface ListDiskPlacementGroupsRequest { pageToken: string; /** * A filter expression that filters resources listed in the response. - * Currently you can use filtering only on the [DiskPlacementGroup.name] field. + * The expression consists of one or more conditions united by `AND` operator: ` [AND [<...> AND ]]`. + * + * Each condition has the form ` `, where: + * 1. `` is the field name. Currently you can use filtering only on the limited number of fields. + * 2. `` is a logical operator, one of `=`, `!=`, `IN`, `NOT IN`. + * 3. `` represents a value. + * String values should be written in double (`"`) or single (`'`) quotes. C-style escape sequences are supported (`\"` turns to `"`, `\'` to `'`, `\\` to backslash). */ filter: string; + /** + * By which column the listing should be ordered and in which direction, + * format is "createdAt desc". "id asc" if omitted. + * The default sorting order is ascending + */ + orderBy: string; } export interface ListDiskPlacementGroupsResponse { @@ -95,6 +108,8 @@ export interface CreateDiskPlacementGroupRequest { zoneId: string; /** Distribute disks over distinct failure domains. */ spreadPlacementStrategy?: DiskSpreadPlacementStrategy | undefined; + /** Distribute disks over partitions. */ + partitionPlacementStrategy?: DiskPartitionPlacementStrategy | undefined; } export interface CreateDiskPlacementGroupRequest_LabelsEntry { @@ -309,6 +324,7 @@ const baseListDiskPlacementGroupsRequest: object = { pageSize: 0, pageToken: "", filter: "", + orderBy: "", }; export const ListDiskPlacementGroupsRequest = { @@ -330,6 +346,9 @@ export const ListDiskPlacementGroupsRequest = { if (message.filter !== "") { writer.uint32(34).string(message.filter); } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } return writer; }, @@ -357,6 +376,9 @@ export const ListDiskPlacementGroupsRequest = { case 4: message.filter = reader.string(); break; + case 5: + message.orderBy = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -385,6 +407,10 @@ export const ListDiskPlacementGroupsRequest = { object.filter !== undefined && object.filter !== null ? String(object.filter) : ""; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; return message; }, @@ -395,6 +421,7 @@ export const ListDiskPlacementGroupsRequest = { (obj.pageSize = Math.round(message.pageSize)); message.pageToken !== undefined && (obj.pageToken = message.pageToken); message.filter !== undefined && (obj.filter = message.filter); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); return obj; }, @@ -408,6 +435,7 @@ export const ListDiskPlacementGroupsRequest = { message.pageSize = object.pageSize ?? 0; message.pageToken = object.pageToken ?? ""; message.filter = object.filter ?? ""; + message.orderBy = object.orderBy ?? ""; return message; }, }; @@ -559,6 +587,12 @@ export const CreateDiskPlacementGroupRequest = { writer.uint32(50).fork() ).ldelim(); } + if (message.partitionPlacementStrategy !== undefined) { + DiskPartitionPlacementStrategy.encode( + message.partitionPlacementStrategy, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -602,6 +636,10 @@ export const CreateDiskPlacementGroupRequest = { reader.uint32() ); break; + case 7: + message.partitionPlacementStrategy = + DiskPartitionPlacementStrategy.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -641,6 +679,13 @@ export const CreateDiskPlacementGroupRequest = { object.spreadPlacementStrategy !== null ? DiskSpreadPlacementStrategy.fromJSON(object.spreadPlacementStrategy) : undefined; + message.partitionPlacementStrategy = + object.partitionPlacementStrategy !== undefined && + object.partitionPlacementStrategy !== null + ? DiskPartitionPlacementStrategy.fromJSON( + object.partitionPlacementStrategy + ) + : undefined; return message; }, @@ -661,6 +706,12 @@ export const CreateDiskPlacementGroupRequest = { (obj.spreadPlacementStrategy = message.spreadPlacementStrategy ? DiskSpreadPlacementStrategy.toJSON(message.spreadPlacementStrategy) : undefined); + message.partitionPlacementStrategy !== undefined && + (obj.partitionPlacementStrategy = message.partitionPlacementStrategy + ? DiskPartitionPlacementStrategy.toJSON( + message.partitionPlacementStrategy + ) + : undefined); return obj; }, @@ -689,6 +740,13 @@ export const CreateDiskPlacementGroupRequest = { object.spreadPlacementStrategy ) : undefined; + message.partitionPlacementStrategy = + object.partitionPlacementStrategy !== undefined && + object.partitionPlacementStrategy !== null + ? DiskPartitionPlacementStrategy.fromPartial( + object.partitionPlacementStrategy + ) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/disk_service.ts b/src/generated/yandex/cloud/compute/v1/disk_service.ts index e27323d2..f3c0b9bd 100644 --- a/src/generated/yandex/cloud/compute/v1/disk_service.ts +++ b/src/generated/yandex/cloud/compute/v1/disk_service.ts @@ -54,12 +54,21 @@ export interface ListDisksRequest { pageToken: string; /** * A filter expression that filters resources listed in the response. - * The expression must specify: - * 1. The field name. Currently you can use filtering only on the [Disk.name] field. - * 2. An `=` operator. - * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z]([-a-z0-9]{,61}[a-z0-9])?`. + * The expression consists of one or more conditions united by `AND` operator: ` [AND [<...> AND ]]`. + * + * Each condition has the form ` `, where: + * 1. `` is the field name. Currently you can use filtering only on the limited number of fields. + * 2. `` is a logical operator, one of `=`, `!=`, `IN`, `NOT IN`. + * 3. `` represents a value. + * String values should be written in double (`"`) or single (`'`) quotes. C-style escape sequences are supported (`\"` turns to `"`, `\'` to `'`, `\\` to backslash). */ filter: string; + /** + * By which column the listing should be ordered and in which direction, + * format is "createdAt desc". "id asc" if omitted. + * The default sorting order is ascending + */ + orderBy: string; } export interface ListDisksResponse { @@ -114,7 +123,7 @@ export interface CreateDiskRequest { blockSize: number; /** Placement policy configuration. */ diskPlacementPolicy?: DiskPlacementPolicy; - /** Snapshot schedules */ + /** List of IDs of the snapshot schedules to attach the disk to. */ snapshotScheduleIds: string[]; } @@ -238,18 +247,46 @@ export interface MoveDiskMetadata { destinationFolderId: string; } +export interface RelocateDiskRequest { + $type: "yandex.cloud.compute.v1.RelocateDiskRequest"; + /** + * ID of the disk to move. + * + * To get the disk ID, make a [DiskService.List] request. + */ + diskId: string; + /** + * ID of the availability zone to move the disk to. + * + * To get the zone ID, make a [ZoneService.List] request. + */ + destinationZoneId: string; +} + +export interface RelocateDiskMetadata { + $type: "yandex.cloud.compute.v1.RelocateDiskMetadata"; + /** ID of the disk that is being moved. */ + diskId: string; + /** ID of the availability zone that the disk is being moved from. */ + sourceZoneId: string; + /** ID of the availability zone that the disk is being moved to. */ + destinationZoneId: string; +} + export interface ListDiskSnapshotSchedulesRequest { $type: "yandex.cloud.compute.v1.ListDiskSnapshotSchedulesRequest"; - /** ID of the Disk resource to list snapshot schedules for. */ + /** ID of the disk to list snapshot schedules for. */ diskId: string; /** * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListDiskOperationsResponse.next_page_token] + * results is larger than `page_size`, the service returns a [ListDiskSnapshotSchedulesResponse.next_page_token] * that can be used to get the next page of results in subsequent list requests. + * + * Default value: 100. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the + * Page token. To get the next page of results, set `page_token` to the * [ListDiskSnapshotSchedulesResponse.next_page_token] returned by a previous list request. */ pageToken: string; @@ -257,13 +294,14 @@ export interface ListDiskSnapshotSchedulesRequest { export interface ListDiskSnapshotSchedulesResponse { $type: "yandex.cloud.compute.v1.ListDiskSnapshotSchedulesResponse"; - /** List of snapshot schedules for the specified disk. */ + /** List of snapshot schedules the specified disk is attached to. */ snapshotSchedules: SnapshotSchedule[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListDiskSnapshotSchedulesRequest.page_size], use the [next_page_token] as the value - * for the [ListDiskSnapshotSchedulesRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListDiskSnapshotSchedulesRequest.page_size], use `next_page_token` as the value + * for the [ListDiskSnapshotSchedulesRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. */ nextPageToken: string; } @@ -336,6 +374,7 @@ const baseListDisksRequest: object = { pageSize: 0, pageToken: "", filter: "", + orderBy: "", }; export const ListDisksRequest = { @@ -357,6 +396,9 @@ export const ListDisksRequest = { if (message.filter !== "") { writer.uint32(34).string(message.filter); } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } return writer; }, @@ -379,6 +421,9 @@ export const ListDisksRequest = { case 4: message.filter = reader.string(); break; + case 5: + message.orderBy = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -405,6 +450,10 @@ export const ListDisksRequest = { object.filter !== undefined && object.filter !== null ? String(object.filter) : ""; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; return message; }, @@ -415,6 +464,7 @@ export const ListDisksRequest = { (obj.pageSize = Math.round(message.pageSize)); message.pageToken !== undefined && (obj.pageToken = message.pageToken); message.filter !== undefined && (obj.filter = message.filter); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); return obj; }, @@ -426,6 +476,7 @@ export const ListDisksRequest = { message.pageSize = object.pageSize ?? 0; message.pageToken = object.pageToken ?? ""; message.filter = object.filter ?? ""; + message.orderBy = object.orderBy ?? ""; return message; }, }; @@ -1726,6 +1777,177 @@ export const MoveDiskMetadata = { messageTypeRegistry.set(MoveDiskMetadata.$type, MoveDiskMetadata); +const baseRelocateDiskRequest: object = { + $type: "yandex.cloud.compute.v1.RelocateDiskRequest", + diskId: "", + destinationZoneId: "", +}; + +export const RelocateDiskRequest = { + $type: "yandex.cloud.compute.v1.RelocateDiskRequest" as const, + + encode( + message: RelocateDiskRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.diskId !== "") { + writer.uint32(10).string(message.diskId); + } + if (message.destinationZoneId !== "") { + writer.uint32(18).string(message.destinationZoneId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RelocateDiskRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRelocateDiskRequest } as RelocateDiskRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.diskId = reader.string(); + break; + case 2: + message.destinationZoneId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RelocateDiskRequest { + const message = { ...baseRelocateDiskRequest } as RelocateDiskRequest; + message.diskId = + object.diskId !== undefined && object.diskId !== null + ? String(object.diskId) + : ""; + message.destinationZoneId = + object.destinationZoneId !== undefined && + object.destinationZoneId !== null + ? String(object.destinationZoneId) + : ""; + return message; + }, + + toJSON(message: RelocateDiskRequest): unknown { + const obj: any = {}; + message.diskId !== undefined && (obj.diskId = message.diskId); + message.destinationZoneId !== undefined && + (obj.destinationZoneId = message.destinationZoneId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RelocateDiskRequest { + const message = { ...baseRelocateDiskRequest } as RelocateDiskRequest; + message.diskId = object.diskId ?? ""; + message.destinationZoneId = object.destinationZoneId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RelocateDiskRequest.$type, RelocateDiskRequest); + +const baseRelocateDiskMetadata: object = { + $type: "yandex.cloud.compute.v1.RelocateDiskMetadata", + diskId: "", + sourceZoneId: "", + destinationZoneId: "", +}; + +export const RelocateDiskMetadata = { + $type: "yandex.cloud.compute.v1.RelocateDiskMetadata" as const, + + encode( + message: RelocateDiskMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.diskId !== "") { + writer.uint32(10).string(message.diskId); + } + if (message.sourceZoneId !== "") { + writer.uint32(18).string(message.sourceZoneId); + } + if (message.destinationZoneId !== "") { + writer.uint32(26).string(message.destinationZoneId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RelocateDiskMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRelocateDiskMetadata } as RelocateDiskMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.diskId = reader.string(); + break; + case 2: + message.sourceZoneId = reader.string(); + break; + case 3: + message.destinationZoneId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RelocateDiskMetadata { + const message = { ...baseRelocateDiskMetadata } as RelocateDiskMetadata; + message.diskId = + object.diskId !== undefined && object.diskId !== null + ? String(object.diskId) + : ""; + message.sourceZoneId = + object.sourceZoneId !== undefined && object.sourceZoneId !== null + ? String(object.sourceZoneId) + : ""; + message.destinationZoneId = + object.destinationZoneId !== undefined && + object.destinationZoneId !== null + ? String(object.destinationZoneId) + : ""; + return message; + }, + + toJSON(message: RelocateDiskMetadata): unknown { + const obj: any = {}; + message.diskId !== undefined && (obj.diskId = message.diskId); + message.sourceZoneId !== undefined && + (obj.sourceZoneId = message.sourceZoneId); + message.destinationZoneId !== undefined && + (obj.destinationZoneId = message.destinationZoneId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RelocateDiskMetadata { + const message = { ...baseRelocateDiskMetadata } as RelocateDiskMetadata; + message.diskId = object.diskId ?? ""; + message.sourceZoneId = object.sourceZoneId ?? ""; + message.destinationZoneId = object.destinationZoneId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RelocateDiskMetadata.$type, RelocateDiskMetadata); + const baseListDiskSnapshotSchedulesRequest: object = { $type: "yandex.cloud.compute.v1.ListDiskSnapshotSchedulesRequest", diskId: "", @@ -2028,7 +2250,24 @@ export const DiskServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** List snapshot schedules containing the disk */ + /** + * Moves the specified disk to another availability zone + * + * Disk must be detached from instances. To move attached + * disk use [InstanceService.Relocate] request. + */ + relocate: { + path: "/yandex.cloud.compute.v1.DiskService/Relocate", + requestStream: false, + responseStream: false, + requestSerialize: (value: RelocateDiskRequest) => + Buffer.from(RelocateDiskRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RelocateDiskRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Retrieves the list of snapshot schedules the specified disk is attached to. */ listSnapshotSchedules: { path: "/yandex.cloud.compute.v1.DiskService/ListSnapshotSchedules", requestStream: false, @@ -2078,7 +2317,14 @@ export interface DiskServiceServer extends UntypedServiceImplementation { >; /** Moves the specified disk to another folder of the same cloud. */ move: handleUnaryCall; - /** List snapshot schedules containing the disk */ + /** + * Moves the specified disk to another availability zone + * + * Disk must be detached from instances. To move attached + * disk use [InstanceService.Relocate] request. + */ + relocate: handleUnaryCall; + /** Retrieves the list of snapshot schedules the specified disk is attached to. */ listSnapshotSchedules: handleUnaryCall< ListDiskSnapshotSchedulesRequest, ListDiskSnapshotSchedulesResponse @@ -2223,7 +2469,28 @@ export interface DiskServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** List snapshot schedules containing the disk */ + /** + * Moves the specified disk to another availability zone + * + * Disk must be detached from instances. To move attached + * disk use [InstanceService.Relocate] request. + */ + relocate( + request: RelocateDiskRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + relocate( + request: RelocateDiskRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + relocate( + request: RelocateDiskRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Retrieves the list of snapshot schedules the specified disk is attached to. */ listSnapshotSchedules( request: ListDiskSnapshotSchedulesRequest, callback: ( diff --git a/src/generated/yandex/cloud/compute/v1/filesystem_service.ts b/src/generated/yandex/cloud/compute/v1/filesystem_service.ts index e074f64c..57c931af 100644 --- a/src/generated/yandex/cloud/compute/v1/filesystem_service.ts +++ b/src/generated/yandex/cloud/compute/v1/filesystem_service.ts @@ -51,15 +51,22 @@ export interface ListFilesystemsRequest { */ pageToken: string; /** - * A filter expression that filters filesystems listed in the response. + * A filter expression that filters resources listed in the response. + * The expression consists of one or more conditions united by `AND` operator: ` [AND [<...> AND ]]`. * - * The expression must specify: - * 1. The field name. Currently you can use filtering only on the [Filesystem.name] field. - * 2. An `=` operator. - * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z]([-a-z0-9]{,61}[a-z0-9])?`. - * Example of a filter: `name=my-filesystem`. + * Each condition has the form ` `, where: + * 1. `` is the field name. Currently you can use filtering only on the limited number of fields. + * 2. `` is a logical operator, one of `=`, `!=`, `IN`, `NOT IN`. + * 3. `` represents a value. + * String values should be written in double (`"`) or single (`'`) quotes. C-style escape sequences are supported (`\"` turns to `"`, `\'` to `'`, `\\` to backslash). */ filter: string; + /** + * By which column the listing should be ordered and in which direction, + * format is "createdAt desc". "id asc" if omitted. + * The default sorting order is ascending + */ + orderBy: string; } export interface ListFilesystemsResponse { @@ -302,6 +309,7 @@ const baseListFilesystemsRequest: object = { pageSize: 0, pageToken: "", filter: "", + orderBy: "", }; export const ListFilesystemsRequest = { @@ -323,6 +331,9 @@ export const ListFilesystemsRequest = { if (message.filter !== "") { writer.uint32(34).string(message.filter); } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } return writer; }, @@ -348,6 +359,9 @@ export const ListFilesystemsRequest = { case 4: message.filter = reader.string(); break; + case 5: + message.orderBy = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -374,6 +388,10 @@ export const ListFilesystemsRequest = { object.filter !== undefined && object.filter !== null ? String(object.filter) : ""; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; return message; }, @@ -384,6 +402,7 @@ export const ListFilesystemsRequest = { (obj.pageSize = Math.round(message.pageSize)); message.pageToken !== undefined && (obj.pageToken = message.pageToken); message.filter !== undefined && (obj.filter = message.filter); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); return obj; }, @@ -395,6 +414,7 @@ export const ListFilesystemsRequest = { message.pageSize = object.pageSize ?? 0; message.pageToken = object.pageToken ?? ""; message.filter = object.filter ?? ""; + message.orderBy = object.orderBy ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/gpu_cluster.ts b/src/generated/yandex/cloud/compute/v1/gpu_cluster.ts new file mode 100644 index 00000000..2b280f8a --- /dev/null +++ b/src/generated/yandex/cloud/compute/v1/gpu_cluster.ts @@ -0,0 +1,455 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.compute.v1"; + +export enum GpuInterconnectType { + GPU_INTERCONNECT_TYPE_UNSPECIFIED = 0, + /** INFINIBAND - InfiniBand interconnect. */ + INFINIBAND = 1, + UNRECOGNIZED = -1, +} + +export function gpuInterconnectTypeFromJSON(object: any): GpuInterconnectType { + switch (object) { + case 0: + case "GPU_INTERCONNECT_TYPE_UNSPECIFIED": + return GpuInterconnectType.GPU_INTERCONNECT_TYPE_UNSPECIFIED; + case 1: + case "INFINIBAND": + return GpuInterconnectType.INFINIBAND; + case -1: + case "UNRECOGNIZED": + default: + return GpuInterconnectType.UNRECOGNIZED; + } +} + +export function gpuInterconnectTypeToJSON(object: GpuInterconnectType): string { + switch (object) { + case GpuInterconnectType.GPU_INTERCONNECT_TYPE_UNSPECIFIED: + return "GPU_INTERCONNECT_TYPE_UNSPECIFIED"; + case GpuInterconnectType.INFINIBAND: + return "INFINIBAND"; + default: + return "UNKNOWN"; + } +} + +/** A GPU cluster. For details about the concept, see [documentation](/docs/compute/concepts/gpu-cluster). */ +export interface GpuCluster { + $type: "yandex.cloud.compute.v1.GpuCluster"; + /** ID of GPU cluster. */ + id: string; + /** ID of the folder that the GPU cluster belongs to. */ + folderId: string; + /** Creation timestamp. */ + createdAt?: Date; + /** + * Name of the GPU cluster. + * + * The name is unique within the folder. + */ + name: string; + /** Description of the GPU cluster. */ + description: string; + /** GPU cluster labels as `key:value` pairs. */ + labels: { [key: string]: string }; + /** Status of the GPU cluster. */ + status: GpuCluster_Status; + /** ID of the availability zone where the GPU cluster resides. */ + zoneId: string; + /** Type of interconnect used for this GPU cluster. */ + interconnectType: GpuInterconnectType; +} + +export enum GpuCluster_Status { + STATUS_UNSPECIFIED = 0, + /** CREATING - GPU cluster is being created. */ + CREATING = 1, + /** READY - GPU cluster is ready to use. */ + READY = 2, + /** ERROR - GPU cluster encountered a problem and cannot operate. */ + ERROR = 3, + /** DELETING - GPU cluster is being deleted. */ + DELETING = 4, + UNRECOGNIZED = -1, +} + +export function gpuCluster_StatusFromJSON(object: any): GpuCluster_Status { + switch (object) { + case 0: + case "STATUS_UNSPECIFIED": + return GpuCluster_Status.STATUS_UNSPECIFIED; + case 1: + case "CREATING": + return GpuCluster_Status.CREATING; + case 2: + case "READY": + return GpuCluster_Status.READY; + case 3: + case "ERROR": + return GpuCluster_Status.ERROR; + case 4: + case "DELETING": + return GpuCluster_Status.DELETING; + case -1: + case "UNRECOGNIZED": + default: + return GpuCluster_Status.UNRECOGNIZED; + } +} + +export function gpuCluster_StatusToJSON(object: GpuCluster_Status): string { + switch (object) { + case GpuCluster_Status.STATUS_UNSPECIFIED: + return "STATUS_UNSPECIFIED"; + case GpuCluster_Status.CREATING: + return "CREATING"; + case GpuCluster_Status.READY: + return "READY"; + case GpuCluster_Status.ERROR: + return "ERROR"; + case GpuCluster_Status.DELETING: + return "DELETING"; + default: + return "UNKNOWN"; + } +} + +export interface GpuCluster_LabelsEntry { + $type: "yandex.cloud.compute.v1.GpuCluster.LabelsEntry"; + key: string; + value: string; +} + +const baseGpuCluster: object = { + $type: "yandex.cloud.compute.v1.GpuCluster", + id: "", + folderId: "", + name: "", + description: "", + status: 0, + zoneId: "", + interconnectType: 0, +}; + +export const GpuCluster = { + $type: "yandex.cloud.compute.v1.GpuCluster" as const, + + encode( + message: GpuCluster, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(34).string(message.name); + } + if (message.description !== "") { + writer.uint32(42).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + GpuCluster_LabelsEntry.encode( + { + $type: "yandex.cloud.compute.v1.GpuCluster.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(50).fork() + ).ldelim(); + }); + if (message.status !== 0) { + writer.uint32(56).int32(message.status); + } + if (message.zoneId !== "") { + writer.uint32(66).string(message.zoneId); + } + if (message.interconnectType !== 0) { + writer.uint32(72).int32(message.interconnectType); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GpuCluster { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGpuCluster } as GpuCluster; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.name = reader.string(); + break; + case 5: + message.description = reader.string(); + break; + case 6: + const entry6 = GpuCluster_LabelsEntry.decode(reader, reader.uint32()); + if (entry6.value !== undefined) { + message.labels[entry6.key] = entry6.value; + } + break; + case 7: + message.status = reader.int32() as any; + break; + case 8: + message.zoneId = reader.string(); + break; + case 9: + message.interconnectType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GpuCluster { + const message = { ...baseGpuCluster } as GpuCluster; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.status = + object.status !== undefined && object.status !== null + ? gpuCluster_StatusFromJSON(object.status) + : 0; + message.zoneId = + object.zoneId !== undefined && object.zoneId !== null + ? String(object.zoneId) + : ""; + message.interconnectType = + object.interconnectType !== undefined && object.interconnectType !== null + ? gpuInterconnectTypeFromJSON(object.interconnectType) + : 0; + return message; + }, + + toJSON(message: GpuCluster): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.status !== undefined && + (obj.status = gpuCluster_StatusToJSON(message.status)); + message.zoneId !== undefined && (obj.zoneId = message.zoneId); + message.interconnectType !== undefined && + (obj.interconnectType = gpuInterconnectTypeToJSON( + message.interconnectType + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): GpuCluster { + const message = { ...baseGpuCluster } as GpuCluster; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.status = object.status ?? 0; + message.zoneId = object.zoneId ?? ""; + message.interconnectType = object.interconnectType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(GpuCluster.$type, GpuCluster); + +const baseGpuCluster_LabelsEntry: object = { + $type: "yandex.cloud.compute.v1.GpuCluster.LabelsEntry", + key: "", + value: "", +}; + +export const GpuCluster_LabelsEntry = { + $type: "yandex.cloud.compute.v1.GpuCluster.LabelsEntry" as const, + + encode( + message: GpuCluster_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GpuCluster_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGpuCluster_LabelsEntry } as GpuCluster_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GpuCluster_LabelsEntry { + const message = { ...baseGpuCluster_LabelsEntry } as GpuCluster_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: GpuCluster_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): GpuCluster_LabelsEntry { + const message = { ...baseGpuCluster_LabelsEntry } as GpuCluster_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GpuCluster_LabelsEntry.$type, GpuCluster_LabelsEntry); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/compute/v1/gpu_cluster_service.ts b/src/generated/yandex/cloud/compute/v1/gpu_cluster_service.ts new file mode 100644 index 00000000..1bf2f5ad --- /dev/null +++ b/src/generated/yandex/cloud/compute/v1/gpu_cluster_service.ts @@ -0,0 +1,2097 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + GpuInterconnectType, + GpuCluster, + gpuInterconnectTypeFromJSON, + gpuInterconnectTypeToJSON, +} from "../../../../yandex/cloud/compute/v1/gpu_cluster"; +import { FieldMask } from "../../../../google/protobuf/field_mask"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; +import { Instance } from "../../../../yandex/cloud/compute/v1/instance"; + +export const protobufPackage = "yandex.cloud.compute.v1"; + +export interface GetGpuClusterRequest { + $type: "yandex.cloud.compute.v1.GetGpuClusterRequest"; + /** + * ID of the GPU cluster to return. + * + * To get a GPU cluster ID, make a [GpuClusterService.List] request. + */ + gpuClusterId: string; +} + +export interface ListGpuClustersRequest { + $type: "yandex.cloud.compute.v1.ListGpuClustersRequest"; + /** + * ID of the folder to list GPU clusters in. + * + * To get the folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than `page_size`, the service returns a [ListGpuClustersResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set `page_token` to the + * [ListGpuClustersResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; + /** + * A filter expression that filters GPU clusters listed in the response. + * + * The expression must specify: + * 1. The field name. Currently you can use filtering only on [GpuCluster.name] field. + * 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + * 3. The value. Must be 3-63 characters long and match the regular expression `^[a-z][-a-z0-9]{1,61}[a-z0-9]`. + * Example of a filter: `name=my-schedule`. + */ + filter: string; + /** + * A sorting expression that sorts GPU clusters listed in the response. + * + * The expression must specify the field name from [GpuCluster] and `asc`ending or `desc`ending order, + * e.g. `createdAt desc`. + * + * Default value: `id asc`. + */ + orderBy: string; +} + +export interface ListGpuClustersResponse { + $type: "yandex.cloud.compute.v1.ListGpuClustersResponse"; + /** List of GPU clusters in the specified folder. */ + gpuClusters: GpuCluster[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListGpuClustersRequest.page_size], use `next_page_token` as the value + * for the [ListGpuClustersRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +export interface CreateGpuClusterRequest { + $type: "yandex.cloud.compute.v1.CreateGpuClusterRequest"; + /** + * ID of the folder to create a GPU cluster in. + * + * To get a folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; + /** + * Name of the GPU cluster. + * + * The name must be unique within the folder. + */ + name: string; + /** Description of the GPU cluster. */ + description: string; + /** GPU cluster labels as `key:value` pairs. */ + labels: { [key: string]: string }; + /** + * ID of the availability zone where the GPU cluster resides. + * To get a list of available zones use the [yandex.cloud.compute.v1.ZoneService.List] request. + */ + zoneId: string; + /** Type of interconnect to use for this GPU cluster. */ + interconnectType: GpuInterconnectType; +} + +export interface CreateGpuClusterRequest_LabelsEntry { + $type: "yandex.cloud.compute.v1.CreateGpuClusterRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateGpuClusterMetadata { + $type: "yandex.cloud.compute.v1.CreateGpuClusterMetadata"; + /** ID of the GPU cluster that is being created. */ + gpuClusterId: string; +} + +export interface UpdateGpuClusterRequest { + $type: "yandex.cloud.compute.v1.UpdateGpuClusterRequest"; + /** + * ID of the GPU cluster to update. + * + * To get the GPU cluster ID, make a [GpuClusterService.List] request. + */ + gpuClusterId: string; + /** Field mask that specifies which attributes of the GPU cluster should be updated. */ + updateMask?: FieldMask; + /** + * New name for the GPU cluster. + * + * The name must be unique within the folder. + */ + name: string; + /** New description of the GPU cluster. */ + description: string; + /** + * New GPU cluster labels as `key:value` pairs. + * + * Existing set of labels is completely replaced by the provided set, so if you just want + * to add or remove a label: + * 1. Get the current set of labels with a [GpuClusterService.Get] request. + * 2. Add or remove a label in this set. + * 3. Send the new set in this field. + */ + labels: { [key: string]: string }; +} + +export interface UpdateGpuClusterRequest_LabelsEntry { + $type: "yandex.cloud.compute.v1.UpdateGpuClusterRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateGpuClusterMetadata { + $type: "yandex.cloud.compute.v1.UpdateGpuClusterMetadata"; + /** ID of the GPU cluster that is being updated. */ + gpuClusterId: string; +} + +export interface DeleteGpuClusterRequest { + $type: "yandex.cloud.compute.v1.DeleteGpuClusterRequest"; + /** + * ID of the GPU cluster to delete. + * + * To get a GPU cluster ID, make a [GpuClusterService.List] request. + */ + gpuClusterId: string; +} + +export interface DeleteGpuClusterMetadata { + $type: "yandex.cloud.compute.v1.DeleteGpuClusterMetadata"; + /** ID of the GPU cluster that is being deleted. */ + gpuClusterId: string; +} + +export interface ListGpuClusterOperationsRequest { + $type: "yandex.cloud.compute.v1.ListGpuClusterOperationsRequest"; + /** + * ID of the GPU cluster to list operations for. + * + * To get a GPU cluster ID, make a [GpuClusterService.List] request. + */ + gpuClusterId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListGpuClusterOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListGpuClusterOperationsResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListGpuClusterOperationsResponse { + $type: "yandex.cloud.compute.v1.ListGpuClusterOperationsResponse"; + /** List of operations for the specified GPU cluster. */ + operations: Operation[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListGpuClusterOperationsRequest.page_size], use `next_page_token` as the value + * for the [ListGpuClusterOperationsRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +export interface ListGpuClusterInstancesRequest { + $type: "yandex.cloud.compute.v1.ListGpuClusterInstancesRequest"; + /** + * ID of the GPU cluster to list instances in. + * + * To get a GPU cluster ID, make a [GpuClusterService.List] request. + */ + gpuClusterId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListGpuClusterInstancesResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListGpuClusterInstancesResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; + /** + * A filter expression that filters resources listed in the response. + * Currently you can use filtering only on the [Instance.name] field. + */ + filter: string; +} + +export interface ListGpuClusterInstancesResponse { + $type: "yandex.cloud.compute.v1.ListGpuClusterInstancesResponse"; + /** List of instances in the specified GPU cluster. */ + instances: Instance[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListGpuClusterInstancesRequest.page_size], use `next_page_token` as the value + * for the [ListGpuClusterInstancesRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetGpuClusterRequest: object = { + $type: "yandex.cloud.compute.v1.GetGpuClusterRequest", + gpuClusterId: "", +}; + +export const GetGpuClusterRequest = { + $type: "yandex.cloud.compute.v1.GetGpuClusterRequest" as const, + + encode( + message: GetGpuClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gpuClusterId !== "") { + writer.uint32(10).string(message.gpuClusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetGpuClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetGpuClusterRequest } as GetGpuClusterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gpuClusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetGpuClusterRequest { + const message = { ...baseGetGpuClusterRequest } as GetGpuClusterRequest; + message.gpuClusterId = + object.gpuClusterId !== undefined && object.gpuClusterId !== null + ? String(object.gpuClusterId) + : ""; + return message; + }, + + toJSON(message: GetGpuClusterRequest): unknown { + const obj: any = {}; + message.gpuClusterId !== undefined && + (obj.gpuClusterId = message.gpuClusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetGpuClusterRequest { + const message = { ...baseGetGpuClusterRequest } as GetGpuClusterRequest; + message.gpuClusterId = object.gpuClusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetGpuClusterRequest.$type, GetGpuClusterRequest); + +const baseListGpuClustersRequest: object = { + $type: "yandex.cloud.compute.v1.ListGpuClustersRequest", + folderId: "", + pageSize: 0, + pageToken: "", + filter: "", + orderBy: "", +}; + +export const ListGpuClustersRequest = { + $type: "yandex.cloud.compute.v1.ListGpuClustersRequest" as const, + + encode( + message: ListGpuClustersRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGpuClustersRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListGpuClustersRequest } as ListGpuClustersRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.filter = reader.string(); + break; + case 5: + message.orderBy = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGpuClustersRequest { + const message = { ...baseListGpuClustersRequest } as ListGpuClustersRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; + return message; + }, + + toJSON(message: ListGpuClustersRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGpuClustersRequest { + const message = { ...baseListGpuClustersRequest } as ListGpuClustersRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + message.orderBy = object.orderBy ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListGpuClustersRequest.$type, ListGpuClustersRequest); + +const baseListGpuClustersResponse: object = { + $type: "yandex.cloud.compute.v1.ListGpuClustersResponse", + nextPageToken: "", +}; + +export const ListGpuClustersResponse = { + $type: "yandex.cloud.compute.v1.ListGpuClustersResponse" as const, + + encode( + message: ListGpuClustersResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.gpuClusters) { + GpuCluster.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGpuClustersResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGpuClustersResponse, + } as ListGpuClustersResponse; + message.gpuClusters = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gpuClusters.push(GpuCluster.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGpuClustersResponse { + const message = { + ...baseListGpuClustersResponse, + } as ListGpuClustersResponse; + message.gpuClusters = (object.gpuClusters ?? []).map((e: any) => + GpuCluster.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListGpuClustersResponse): unknown { + const obj: any = {}; + if (message.gpuClusters) { + obj.gpuClusters = message.gpuClusters.map((e) => + e ? GpuCluster.toJSON(e) : undefined + ); + } else { + obj.gpuClusters = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGpuClustersResponse { + const message = { + ...baseListGpuClustersResponse, + } as ListGpuClustersResponse; + message.gpuClusters = + object.gpuClusters?.map((e) => GpuCluster.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListGpuClustersResponse.$type, ListGpuClustersResponse); + +const baseCreateGpuClusterRequest: object = { + $type: "yandex.cloud.compute.v1.CreateGpuClusterRequest", + folderId: "", + name: "", + description: "", + zoneId: "", + interconnectType: 0, +}; + +export const CreateGpuClusterRequest = { + $type: "yandex.cloud.compute.v1.CreateGpuClusterRequest" as const, + + encode( + message: CreateGpuClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateGpuClusterRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.compute.v1.CreateGpuClusterRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.zoneId !== "") { + writer.uint32(42).string(message.zoneId); + } + if (message.interconnectType !== 0) { + writer.uint32(48).int32(message.interconnectType); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateGpuClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateGpuClusterRequest, + } as CreateGpuClusterRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = CreateGpuClusterRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.zoneId = reader.string(); + break; + case 6: + message.interconnectType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateGpuClusterRequest { + const message = { + ...baseCreateGpuClusterRequest, + } as CreateGpuClusterRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.zoneId = + object.zoneId !== undefined && object.zoneId !== null + ? String(object.zoneId) + : ""; + message.interconnectType = + object.interconnectType !== undefined && object.interconnectType !== null + ? gpuInterconnectTypeFromJSON(object.interconnectType) + : 0; + return message; + }, + + toJSON(message: CreateGpuClusterRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.zoneId !== undefined && (obj.zoneId = message.zoneId); + message.interconnectType !== undefined && + (obj.interconnectType = gpuInterconnectTypeToJSON( + message.interconnectType + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateGpuClusterRequest { + const message = { + ...baseCreateGpuClusterRequest, + } as CreateGpuClusterRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.zoneId = object.zoneId ?? ""; + message.interconnectType = object.interconnectType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(CreateGpuClusterRequest.$type, CreateGpuClusterRequest); + +const baseCreateGpuClusterRequest_LabelsEntry: object = { + $type: "yandex.cloud.compute.v1.CreateGpuClusterRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateGpuClusterRequest_LabelsEntry = { + $type: "yandex.cloud.compute.v1.CreateGpuClusterRequest.LabelsEntry" as const, + + encode( + message: CreateGpuClusterRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateGpuClusterRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateGpuClusterRequest_LabelsEntry, + } as CreateGpuClusterRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateGpuClusterRequest_LabelsEntry { + const message = { + ...baseCreateGpuClusterRequest_LabelsEntry, + } as CreateGpuClusterRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateGpuClusterRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateGpuClusterRequest_LabelsEntry { + const message = { + ...baseCreateGpuClusterRequest_LabelsEntry, + } as CreateGpuClusterRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateGpuClusterRequest_LabelsEntry.$type, + CreateGpuClusterRequest_LabelsEntry +); + +const baseCreateGpuClusterMetadata: object = { + $type: "yandex.cloud.compute.v1.CreateGpuClusterMetadata", + gpuClusterId: "", +}; + +export const CreateGpuClusterMetadata = { + $type: "yandex.cloud.compute.v1.CreateGpuClusterMetadata" as const, + + encode( + message: CreateGpuClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gpuClusterId !== "") { + writer.uint32(10).string(message.gpuClusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateGpuClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateGpuClusterMetadata, + } as CreateGpuClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gpuClusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateGpuClusterMetadata { + const message = { + ...baseCreateGpuClusterMetadata, + } as CreateGpuClusterMetadata; + message.gpuClusterId = + object.gpuClusterId !== undefined && object.gpuClusterId !== null + ? String(object.gpuClusterId) + : ""; + return message; + }, + + toJSON(message: CreateGpuClusterMetadata): unknown { + const obj: any = {}; + message.gpuClusterId !== undefined && + (obj.gpuClusterId = message.gpuClusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateGpuClusterMetadata { + const message = { + ...baseCreateGpuClusterMetadata, + } as CreateGpuClusterMetadata; + message.gpuClusterId = object.gpuClusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateGpuClusterMetadata.$type, + CreateGpuClusterMetadata +); + +const baseUpdateGpuClusterRequest: object = { + $type: "yandex.cloud.compute.v1.UpdateGpuClusterRequest", + gpuClusterId: "", + name: "", + description: "", +}; + +export const UpdateGpuClusterRequest = { + $type: "yandex.cloud.compute.v1.UpdateGpuClusterRequest" as const, + + encode( + message: UpdateGpuClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gpuClusterId !== "") { + writer.uint32(10).string(message.gpuClusterId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateGpuClusterRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.compute.v1.UpdateGpuClusterRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGpuClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateGpuClusterRequest, + } as UpdateGpuClusterRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gpuClusterId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = UpdateGpuClusterRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGpuClusterRequest { + const message = { + ...baseUpdateGpuClusterRequest, + } as UpdateGpuClusterRequest; + message.gpuClusterId = + object.gpuClusterId !== undefined && object.gpuClusterId !== null + ? String(object.gpuClusterId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + return message; + }, + + toJSON(message: UpdateGpuClusterRequest): unknown { + const obj: any = {}; + message.gpuClusterId !== undefined && + (obj.gpuClusterId = message.gpuClusterId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGpuClusterRequest { + const message = { + ...baseUpdateGpuClusterRequest, + } as UpdateGpuClusterRequest; + message.gpuClusterId = object.gpuClusterId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + return message; + }, +}; + +messageTypeRegistry.set(UpdateGpuClusterRequest.$type, UpdateGpuClusterRequest); + +const baseUpdateGpuClusterRequest_LabelsEntry: object = { + $type: "yandex.cloud.compute.v1.UpdateGpuClusterRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateGpuClusterRequest_LabelsEntry = { + $type: "yandex.cloud.compute.v1.UpdateGpuClusterRequest.LabelsEntry" as const, + + encode( + message: UpdateGpuClusterRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGpuClusterRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateGpuClusterRequest_LabelsEntry, + } as UpdateGpuClusterRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGpuClusterRequest_LabelsEntry { + const message = { + ...baseUpdateGpuClusterRequest_LabelsEntry, + } as UpdateGpuClusterRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateGpuClusterRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateGpuClusterRequest_LabelsEntry { + const message = { + ...baseUpdateGpuClusterRequest_LabelsEntry, + } as UpdateGpuClusterRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateGpuClusterRequest_LabelsEntry.$type, + UpdateGpuClusterRequest_LabelsEntry +); + +const baseUpdateGpuClusterMetadata: object = { + $type: "yandex.cloud.compute.v1.UpdateGpuClusterMetadata", + gpuClusterId: "", +}; + +export const UpdateGpuClusterMetadata = { + $type: "yandex.cloud.compute.v1.UpdateGpuClusterMetadata" as const, + + encode( + message: UpdateGpuClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gpuClusterId !== "") { + writer.uint32(10).string(message.gpuClusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGpuClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateGpuClusterMetadata, + } as UpdateGpuClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gpuClusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGpuClusterMetadata { + const message = { + ...baseUpdateGpuClusterMetadata, + } as UpdateGpuClusterMetadata; + message.gpuClusterId = + object.gpuClusterId !== undefined && object.gpuClusterId !== null + ? String(object.gpuClusterId) + : ""; + return message; + }, + + toJSON(message: UpdateGpuClusterMetadata): unknown { + const obj: any = {}; + message.gpuClusterId !== undefined && + (obj.gpuClusterId = message.gpuClusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGpuClusterMetadata { + const message = { + ...baseUpdateGpuClusterMetadata, + } as UpdateGpuClusterMetadata; + message.gpuClusterId = object.gpuClusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateGpuClusterMetadata.$type, + UpdateGpuClusterMetadata +); + +const baseDeleteGpuClusterRequest: object = { + $type: "yandex.cloud.compute.v1.DeleteGpuClusterRequest", + gpuClusterId: "", +}; + +export const DeleteGpuClusterRequest = { + $type: "yandex.cloud.compute.v1.DeleteGpuClusterRequest" as const, + + encode( + message: DeleteGpuClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gpuClusterId !== "") { + writer.uint32(10).string(message.gpuClusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteGpuClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteGpuClusterRequest, + } as DeleteGpuClusterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gpuClusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteGpuClusterRequest { + const message = { + ...baseDeleteGpuClusterRequest, + } as DeleteGpuClusterRequest; + message.gpuClusterId = + object.gpuClusterId !== undefined && object.gpuClusterId !== null + ? String(object.gpuClusterId) + : ""; + return message; + }, + + toJSON(message: DeleteGpuClusterRequest): unknown { + const obj: any = {}; + message.gpuClusterId !== undefined && + (obj.gpuClusterId = message.gpuClusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteGpuClusterRequest { + const message = { + ...baseDeleteGpuClusterRequest, + } as DeleteGpuClusterRequest; + message.gpuClusterId = object.gpuClusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteGpuClusterRequest.$type, DeleteGpuClusterRequest); + +const baseDeleteGpuClusterMetadata: object = { + $type: "yandex.cloud.compute.v1.DeleteGpuClusterMetadata", + gpuClusterId: "", +}; + +export const DeleteGpuClusterMetadata = { + $type: "yandex.cloud.compute.v1.DeleteGpuClusterMetadata" as const, + + encode( + message: DeleteGpuClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gpuClusterId !== "") { + writer.uint32(10).string(message.gpuClusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteGpuClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteGpuClusterMetadata, + } as DeleteGpuClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gpuClusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteGpuClusterMetadata { + const message = { + ...baseDeleteGpuClusterMetadata, + } as DeleteGpuClusterMetadata; + message.gpuClusterId = + object.gpuClusterId !== undefined && object.gpuClusterId !== null + ? String(object.gpuClusterId) + : ""; + return message; + }, + + toJSON(message: DeleteGpuClusterMetadata): unknown { + const obj: any = {}; + message.gpuClusterId !== undefined && + (obj.gpuClusterId = message.gpuClusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteGpuClusterMetadata { + const message = { + ...baseDeleteGpuClusterMetadata, + } as DeleteGpuClusterMetadata; + message.gpuClusterId = object.gpuClusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteGpuClusterMetadata.$type, + DeleteGpuClusterMetadata +); + +const baseListGpuClusterOperationsRequest: object = { + $type: "yandex.cloud.compute.v1.ListGpuClusterOperationsRequest", + gpuClusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListGpuClusterOperationsRequest = { + $type: "yandex.cloud.compute.v1.ListGpuClusterOperationsRequest" as const, + + encode( + message: ListGpuClusterOperationsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gpuClusterId !== "") { + writer.uint32(10).string(message.gpuClusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGpuClusterOperationsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGpuClusterOperationsRequest, + } as ListGpuClusterOperationsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gpuClusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGpuClusterOperationsRequest { + const message = { + ...baseListGpuClusterOperationsRequest, + } as ListGpuClusterOperationsRequest; + message.gpuClusterId = + object.gpuClusterId !== undefined && object.gpuClusterId !== null + ? String(object.gpuClusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListGpuClusterOperationsRequest): unknown { + const obj: any = {}; + message.gpuClusterId !== undefined && + (obj.gpuClusterId = message.gpuClusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGpuClusterOperationsRequest { + const message = { + ...baseListGpuClusterOperationsRequest, + } as ListGpuClusterOperationsRequest; + message.gpuClusterId = object.gpuClusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListGpuClusterOperationsRequest.$type, + ListGpuClusterOperationsRequest +); + +const baseListGpuClusterOperationsResponse: object = { + $type: "yandex.cloud.compute.v1.ListGpuClusterOperationsResponse", + nextPageToken: "", +}; + +export const ListGpuClusterOperationsResponse = { + $type: "yandex.cloud.compute.v1.ListGpuClusterOperationsResponse" as const, + + encode( + message: ListGpuClusterOperationsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.operations) { + Operation.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGpuClusterOperationsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGpuClusterOperationsResponse, + } as ListGpuClusterOperationsResponse; + message.operations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operations.push(Operation.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGpuClusterOperationsResponse { + const message = { + ...baseListGpuClusterOperationsResponse, + } as ListGpuClusterOperationsResponse; + message.operations = (object.operations ?? []).map((e: any) => + Operation.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListGpuClusterOperationsResponse): unknown { + const obj: any = {}; + if (message.operations) { + obj.operations = message.operations.map((e) => + e ? Operation.toJSON(e) : undefined + ); + } else { + obj.operations = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListGpuClusterOperationsResponse { + const message = { + ...baseListGpuClusterOperationsResponse, + } as ListGpuClusterOperationsResponse; + message.operations = + object.operations?.map((e) => Operation.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListGpuClusterOperationsResponse.$type, + ListGpuClusterOperationsResponse +); + +const baseListGpuClusterInstancesRequest: object = { + $type: "yandex.cloud.compute.v1.ListGpuClusterInstancesRequest", + gpuClusterId: "", + pageSize: 0, + pageToken: "", + filter: "", +}; + +export const ListGpuClusterInstancesRequest = { + $type: "yandex.cloud.compute.v1.ListGpuClusterInstancesRequest" as const, + + encode( + message: ListGpuClusterInstancesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gpuClusterId !== "") { + writer.uint32(10).string(message.gpuClusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGpuClusterInstancesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGpuClusterInstancesRequest, + } as ListGpuClusterInstancesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gpuClusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGpuClusterInstancesRequest { + const message = { + ...baseListGpuClusterInstancesRequest, + } as ListGpuClusterInstancesRequest; + message.gpuClusterId = + object.gpuClusterId !== undefined && object.gpuClusterId !== null + ? String(object.gpuClusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListGpuClusterInstancesRequest): unknown { + const obj: any = {}; + message.gpuClusterId !== undefined && + (obj.gpuClusterId = message.gpuClusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGpuClusterInstancesRequest { + const message = { + ...baseListGpuClusterInstancesRequest, + } as ListGpuClusterInstancesRequest; + message.gpuClusterId = object.gpuClusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListGpuClusterInstancesRequest.$type, + ListGpuClusterInstancesRequest +); + +const baseListGpuClusterInstancesResponse: object = { + $type: "yandex.cloud.compute.v1.ListGpuClusterInstancesResponse", + nextPageToken: "", +}; + +export const ListGpuClusterInstancesResponse = { + $type: "yandex.cloud.compute.v1.ListGpuClusterInstancesResponse" as const, + + encode( + message: ListGpuClusterInstancesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.instances) { + Instance.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGpuClusterInstancesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGpuClusterInstancesResponse, + } as ListGpuClusterInstancesResponse; + message.instances = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instances.push(Instance.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGpuClusterInstancesResponse { + const message = { + ...baseListGpuClusterInstancesResponse, + } as ListGpuClusterInstancesResponse; + message.instances = (object.instances ?? []).map((e: any) => + Instance.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListGpuClusterInstancesResponse): unknown { + const obj: any = {}; + if (message.instances) { + obj.instances = message.instances.map((e) => + e ? Instance.toJSON(e) : undefined + ); + } else { + obj.instances = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGpuClusterInstancesResponse { + const message = { + ...baseListGpuClusterInstancesResponse, + } as ListGpuClusterInstancesResponse; + message.instances = + object.instances?.map((e) => Instance.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListGpuClusterInstancesResponse.$type, + ListGpuClusterInstancesResponse +); + +/** A set of methods for managing GPU clusters. */ +export const GpuClusterServiceService = { + /** + * Returns the specified GPU cluster. + * + * To get the list of available GPU clusters, make a [List] request. + */ + get: { + path: "/yandex.cloud.compute.v1.GpuClusterService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetGpuClusterRequest) => + Buffer.from(GetGpuClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetGpuClusterRequest.decode(value), + responseSerialize: (value: GpuCluster) => + Buffer.from(GpuCluster.encode(value).finish()), + responseDeserialize: (value: Buffer) => GpuCluster.decode(value), + }, + /** Retrieves the list of GPU clusters in the specified folder. */ + list: { + path: "/yandex.cloud.compute.v1.GpuClusterService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListGpuClustersRequest) => + Buffer.from(ListGpuClustersRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListGpuClustersRequest.decode(value), + responseSerialize: (value: ListGpuClustersResponse) => + Buffer.from(ListGpuClustersResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListGpuClustersResponse.decode(value), + }, + /** Creates a GPU cluster in the specified folder. */ + create: { + path: "/yandex.cloud.compute.v1.GpuClusterService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateGpuClusterRequest) => + Buffer.from(CreateGpuClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + CreateGpuClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Updates the specified GPU cluster. + * + * Currently only name, description and labels can be updated. + */ + update: { + path: "/yandex.cloud.compute.v1.GpuClusterService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateGpuClusterRequest) => + Buffer.from(UpdateGpuClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateGpuClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Deletes the specified GPU cluster. + * + * GPU cluster can be deleted only if it doesn't have any instances associated with it. + */ + delete: { + path: "/yandex.cloud.compute.v1.GpuClusterService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteGpuClusterRequest) => + Buffer.from(DeleteGpuClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteGpuClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Lists operations for the specified GPU cluster. */ + listOperations: { + path: "/yandex.cloud.compute.v1.GpuClusterService/ListOperations", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListGpuClusterOperationsRequest) => + Buffer.from(ListGpuClusterOperationsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListGpuClusterOperationsRequest.decode(value), + responseSerialize: (value: ListGpuClusterOperationsResponse) => + Buffer.from(ListGpuClusterOperationsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListGpuClusterOperationsResponse.decode(value), + }, + /** List instances created in this GPU cluster. */ + listInstances: { + path: "/yandex.cloud.compute.v1.GpuClusterService/ListInstances", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListGpuClusterInstancesRequest) => + Buffer.from(ListGpuClusterInstancesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListGpuClusterInstancesRequest.decode(value), + responseSerialize: (value: ListGpuClusterInstancesResponse) => + Buffer.from(ListGpuClusterInstancesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListGpuClusterInstancesResponse.decode(value), + }, +} as const; + +export interface GpuClusterServiceServer extends UntypedServiceImplementation { + /** + * Returns the specified GPU cluster. + * + * To get the list of available GPU clusters, make a [List] request. + */ + get: handleUnaryCall; + /** Retrieves the list of GPU clusters in the specified folder. */ + list: handleUnaryCall; + /** Creates a GPU cluster in the specified folder. */ + create: handleUnaryCall; + /** + * Updates the specified GPU cluster. + * + * Currently only name, description and labels can be updated. + */ + update: handleUnaryCall; + /** + * Deletes the specified GPU cluster. + * + * GPU cluster can be deleted only if it doesn't have any instances associated with it. + */ + delete: handleUnaryCall; + /** Lists operations for the specified GPU cluster. */ + listOperations: handleUnaryCall< + ListGpuClusterOperationsRequest, + ListGpuClusterOperationsResponse + >; + /** List instances created in this GPU cluster. */ + listInstances: handleUnaryCall< + ListGpuClusterInstancesRequest, + ListGpuClusterInstancesResponse + >; +} + +export interface GpuClusterServiceClient extends Client { + /** + * Returns the specified GPU cluster. + * + * To get the list of available GPU clusters, make a [List] request. + */ + get( + request: GetGpuClusterRequest, + callback: (error: ServiceError | null, response: GpuCluster) => void + ): ClientUnaryCall; + get( + request: GetGpuClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: GpuCluster) => void + ): ClientUnaryCall; + get( + request: GetGpuClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: GpuCluster) => void + ): ClientUnaryCall; + /** Retrieves the list of GPU clusters in the specified folder. */ + list( + request: ListGpuClustersRequest, + callback: ( + error: ServiceError | null, + response: ListGpuClustersResponse + ) => void + ): ClientUnaryCall; + list( + request: ListGpuClustersRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListGpuClustersResponse + ) => void + ): ClientUnaryCall; + list( + request: ListGpuClustersRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListGpuClustersResponse + ) => void + ): ClientUnaryCall; + /** Creates a GPU cluster in the specified folder. */ + create( + request: CreateGpuClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateGpuClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateGpuClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Updates the specified GPU cluster. + * + * Currently only name, description and labels can be updated. + */ + update( + request: UpdateGpuClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateGpuClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateGpuClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Deletes the specified GPU cluster. + * + * GPU cluster can be deleted only if it doesn't have any instances associated with it. + */ + delete( + request: DeleteGpuClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteGpuClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteGpuClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Lists operations for the specified GPU cluster. */ + listOperations( + request: ListGpuClusterOperationsRequest, + callback: ( + error: ServiceError | null, + response: ListGpuClusterOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListGpuClusterOperationsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListGpuClusterOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListGpuClusterOperationsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListGpuClusterOperationsResponse + ) => void + ): ClientUnaryCall; + /** List instances created in this GPU cluster. */ + listInstances( + request: ListGpuClusterInstancesRequest, + callback: ( + error: ServiceError | null, + response: ListGpuClusterInstancesResponse + ) => void + ): ClientUnaryCall; + listInstances( + request: ListGpuClusterInstancesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListGpuClusterInstancesResponse + ) => void + ): ClientUnaryCall; + listInstances( + request: ListGpuClusterInstancesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListGpuClusterInstancesResponse + ) => void + ): ClientUnaryCall; +} + +export const GpuClusterServiceClient = makeGenericClientConstructor( + GpuClusterServiceService, + "yandex.cloud.compute.v1.GpuClusterService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): GpuClusterServiceClient; + service: typeof GpuClusterServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/compute/v1/host_group_service.ts b/src/generated/yandex/cloud/compute/v1/host_group_service.ts index 5e9bf262..0b22cbce 100644 --- a/src/generated/yandex/cloud/compute/v1/host_group_service.ts +++ b/src/generated/yandex/cloud/compute/v1/host_group_service.ts @@ -59,9 +59,21 @@ export interface ListHostGroupsRequest { pageToken: string; /** * A filter expression that filters resources listed in the response. - * Currently you can use filtering only on the [HostGroup.name] field. + * The expression consists of one or more conditions united by `AND` operator: ` [AND [<...> AND ]]`. + * + * Each condition has the form ` `, where: + * 1. `` is the field name. Currently you can use filtering only on the limited number of fields. + * 2. `` is a logical operator, one of `=`, `!=`, `IN`, `NOT IN`. + * 3. `` represents a value. + * String values should be written in double (`"`) or single (`'`) quotes. C-style escape sequences are supported (`\"` turns to `"`, `\'` to `'`, `\\` to backslash). */ filter: string; + /** + * By which column the listing should be ordered and in which direction, + * format is "createdAt desc". "id asc" if omitted. + * The default sorting order is ascending + */ + orderBy: string; } export interface ListHostGroupsResponse { @@ -186,11 +198,7 @@ export interface ListHostGroupInstancesRequest { * returned by a previous list request. */ pageToken: string; - /** - * A filter expression that filters resources listed in the response. - * Currently you can use filtering only on the [Host.id] field. - * To get the host ID, use [HostGroupService.ListHosts] request. - */ + /** Filter support is not currently implemented. Any filters are ignored. */ filter: string; } @@ -348,6 +356,7 @@ const baseListHostGroupsRequest: object = { pageSize: 0, pageToken: "", filter: "", + orderBy: "", }; export const ListHostGroupsRequest = { @@ -369,6 +378,9 @@ export const ListHostGroupsRequest = { if (message.filter !== "") { writer.uint32(34).string(message.filter); } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } return writer; }, @@ -394,6 +406,9 @@ export const ListHostGroupsRequest = { case 4: message.filter = reader.string(); break; + case 5: + message.orderBy = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -420,6 +435,10 @@ export const ListHostGroupsRequest = { object.filter !== undefined && object.filter !== null ? String(object.filter) : ""; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; return message; }, @@ -430,6 +449,7 @@ export const ListHostGroupsRequest = { (obj.pageSize = Math.round(message.pageSize)); message.pageToken !== undefined && (obj.pageToken = message.pageToken); message.filter !== undefined && (obj.filter = message.filter); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); return obj; }, @@ -441,6 +461,7 @@ export const ListHostGroupsRequest = { message.pageSize = object.pageSize ?? 0; message.pageToken = object.pageToken ?? ""; message.filter = object.filter ?? ""; + message.orderBy = object.orderBy ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/image_service.ts b/src/generated/yandex/cloud/compute/v1/image_service.ts index cb9b4e8e..17c933ca 100644 --- a/src/generated/yandex/cloud/compute/v1/image_service.ts +++ b/src/generated/yandex/cloud/compute/v1/image_service.ts @@ -61,12 +61,21 @@ export interface ListImagesRequest { pageToken: string; /** * A filter expression that filters resources listed in the response. - * The expression must specify: - * 1. The field name. Currently you can use filtering only on the [Image.name] field. - * 2. An `=` operator. - * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z]([-a-z0-9]{,61}[a-z0-9])?`. + * The expression consists of one or more conditions united by `AND` operator: ` [AND [<...> AND ]]`. + * + * Each condition has the form ` `, where: + * 1. `` is the field name. Currently you can use filtering only on the limited number of fields. + * 2. `` is a logical operator, one of `=`, `!=`, `IN`, `NOT IN`. + * 3. `` represents a value. + * String values should be written in double (`"`) or single (`'`) quotes. C-style escape sequences are supported (`\"` turns to `"`, `\'` to `'`, `\\` to backslash). */ filter: string; + /** + * By which column the listing should be ordered and in which direction, + * format is "createdAt desc". "id asc" if omitted. + * The default sorting order is ascending + */ + orderBy: string; } export interface ListImagesResponse { @@ -128,7 +137,7 @@ export interface CreateImageRequest { /** * URI of the source image to create the new image from. * Currently only supports links to images that are stored in Object Storage. - * Currently only supports Qcow2, VMDK, and VHD formats. + * Currently only supports Qcow2, VMDK, and RAW formats. */ uri: string | undefined; /** @@ -391,6 +400,7 @@ const baseListImagesRequest: object = { pageSize: 0, pageToken: "", filter: "", + orderBy: "", }; export const ListImagesRequest = { @@ -412,6 +422,9 @@ export const ListImagesRequest = { if (message.filter !== "") { writer.uint32(34).string(message.filter); } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } return writer; }, @@ -434,6 +447,9 @@ export const ListImagesRequest = { case 4: message.filter = reader.string(); break; + case 5: + message.orderBy = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -460,6 +476,10 @@ export const ListImagesRequest = { object.filter !== undefined && object.filter !== null ? String(object.filter) : ""; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; return message; }, @@ -470,6 +490,7 @@ export const ListImagesRequest = { (obj.pageSize = Math.round(message.pageSize)); message.pageToken !== undefined && (obj.pageToken = message.pageToken); message.filter !== undefined && (obj.filter = message.filter); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); return obj; }, @@ -481,6 +502,7 @@ export const ListImagesRequest = { message.pageSize = object.pageSize ?? 0; message.pageToken = object.pageToken ?? ""; message.filter = object.filter ?? ""; + message.orderBy = object.orderBy ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/instance.ts b/src/generated/yandex/cloud/compute/v1/instance.ts index 11632e4d..31188be7 100644 --- a/src/generated/yandex/cloud/compute/v1/instance.ts +++ b/src/generated/yandex/cloud/compute/v1/instance.ts @@ -127,6 +127,8 @@ export interface Instance { filesystems: AttachedFilesystem[]; /** Array of network interfaces that are attached to the instance. */ networkInterfaces: NetworkInterface[]; + /** GPU settings */ + gpuSettings?: GpuSettings; /** * A domain name of the instance. FQDN is defined by the server * in the format `..internal` when the instance is created. @@ -144,6 +146,10 @@ export interface Instance { networkSettings?: NetworkSettings; /** Placement policy configuration. */ placementPolicy?: PlacementPolicy; + /** ID of the dedicated host group that the instance belongs to. */ + hostGroupId: string; + /** ID of the dedicated host that the instance belongs to. */ + hostId: string; } export enum Instance_Status { @@ -515,12 +521,20 @@ export function networkSettings_TypeToJSON( } } +export interface GpuSettings { + $type: "yandex.cloud.compute.v1.GpuSettings"; + /** Attach instance to specified GPU cluster. */ + gpuClusterId: string; +} + export interface PlacementPolicy { $type: "yandex.cloud.compute.v1.PlacementPolicy"; /** Placement group ID. */ placementGroupId: string; /** List of affinity rules. Scheduler will attempt to allocate instances according to order of rules. */ hostAffinityRules: PlacementPolicy_HostAffinityRule[]; + /** Placement group partition */ + placementGroupPartition: number; } /** Affinity definition */ @@ -599,6 +613,8 @@ const baseInstance: object = { status: 0, fqdn: "", serviceAccountId: "", + hostGroupId: "", + hostId: "", }; export const Instance = { @@ -679,6 +695,12 @@ export const Instance = { for (const v of message.networkInterfaces) { NetworkInterface.encode(v!, writer.uint32(114).fork()).ldelim(); } + if (message.gpuSettings !== undefined) { + GpuSettings.encode( + message.gpuSettings, + writer.uint32(210).fork() + ).ldelim(); + } if (message.fqdn !== "") { writer.uint32(130).string(message.fqdn); } @@ -703,6 +725,12 @@ export const Instance = { writer.uint32(162).fork() ).ldelim(); } + if (message.hostGroupId !== "") { + writer.uint32(218).string(message.hostGroupId); + } + if (message.hostId !== "") { + writer.uint32(226).string(message.hostId); + } return writer; }, @@ -792,6 +820,9 @@ export const Instance = { NetworkInterface.decode(reader, reader.uint32()) ); break; + case 26: + message.gpuSettings = GpuSettings.decode(reader, reader.uint32()); + break; case 16: message.fqdn = reader.string(); break; @@ -816,6 +847,12 @@ export const Instance = { reader.uint32() ); break; + case 27: + message.hostGroupId = reader.string(); + break; + case 28: + message.hostId = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -892,6 +929,10 @@ export const Instance = { message.networkInterfaces = (object.networkInterfaces ?? []).map((e: any) => NetworkInterface.fromJSON(e) ); + message.gpuSettings = + object.gpuSettings !== undefined && object.gpuSettings !== null + ? GpuSettings.fromJSON(object.gpuSettings) + : undefined; message.fqdn = object.fqdn !== undefined && object.fqdn !== null ? String(object.fqdn) @@ -912,6 +953,14 @@ export const Instance = { object.placementPolicy !== undefined && object.placementPolicy !== null ? PlacementPolicy.fromJSON(object.placementPolicy) : undefined; + message.hostGroupId = + object.hostGroupId !== undefined && object.hostGroupId !== null + ? String(object.hostGroupId) + : ""; + message.hostId = + object.hostId !== undefined && object.hostId !== null + ? String(object.hostId) + : ""; return message; }, @@ -980,6 +1029,10 @@ export const Instance = { } else { obj.networkInterfaces = []; } + message.gpuSettings !== undefined && + (obj.gpuSettings = message.gpuSettings + ? GpuSettings.toJSON(message.gpuSettings) + : undefined); message.fqdn !== undefined && (obj.fqdn = message.fqdn); message.schedulingPolicy !== undefined && (obj.schedulingPolicy = message.schedulingPolicy @@ -995,6 +1048,9 @@ export const Instance = { (obj.placementPolicy = message.placementPolicy ? PlacementPolicy.toJSON(message.placementPolicy) : undefined); + message.hostGroupId !== undefined && + (obj.hostGroupId = message.hostGroupId); + message.hostId !== undefined && (obj.hostId = message.hostId); return obj; }, @@ -1045,6 +1101,10 @@ export const Instance = { message.networkInterfaces = object.networkInterfaces?.map((e) => NetworkInterface.fromPartial(e)) || []; + message.gpuSettings = + object.gpuSettings !== undefined && object.gpuSettings !== null + ? GpuSettings.fromPartial(object.gpuSettings) + : undefined; message.fqdn = object.fqdn ?? ""; message.schedulingPolicy = object.schedulingPolicy !== undefined && object.schedulingPolicy !== null @@ -1059,6 +1119,8 @@ export const Instance = { object.placementPolicy !== undefined && object.placementPolicy !== null ? PlacementPolicy.fromPartial(object.placementPolicy) : undefined; + message.hostGroupId = object.hostGroupId ?? ""; + message.hostId = object.hostId ?? ""; return message; }, }; @@ -2161,9 +2223,73 @@ export const NetworkSettings = { messageTypeRegistry.set(NetworkSettings.$type, NetworkSettings); +const baseGpuSettings: object = { + $type: "yandex.cloud.compute.v1.GpuSettings", + gpuClusterId: "", +}; + +export const GpuSettings = { + $type: "yandex.cloud.compute.v1.GpuSettings" as const, + + encode( + message: GpuSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gpuClusterId !== "") { + writer.uint32(10).string(message.gpuClusterId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GpuSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGpuSettings } as GpuSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gpuClusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GpuSettings { + const message = { ...baseGpuSettings } as GpuSettings; + message.gpuClusterId = + object.gpuClusterId !== undefined && object.gpuClusterId !== null + ? String(object.gpuClusterId) + : ""; + return message; + }, + + toJSON(message: GpuSettings): unknown { + const obj: any = {}; + message.gpuClusterId !== undefined && + (obj.gpuClusterId = message.gpuClusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GpuSettings { + const message = { ...baseGpuSettings } as GpuSettings; + message.gpuClusterId = object.gpuClusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GpuSettings.$type, GpuSettings); + const basePlacementPolicy: object = { $type: "yandex.cloud.compute.v1.PlacementPolicy", placementGroupId: "", + placementGroupPartition: 0, }; export const PlacementPolicy = { @@ -2182,6 +2308,9 @@ export const PlacementPolicy = { writer.uint32(18).fork() ).ldelim(); } + if (message.placementGroupPartition !== 0) { + writer.uint32(24).int64(message.placementGroupPartition); + } return writer; }, @@ -2201,6 +2330,11 @@ export const PlacementPolicy = { PlacementPolicy_HostAffinityRule.decode(reader, reader.uint32()) ); break; + case 3: + message.placementGroupPartition = longToNumber( + reader.int64() as Long + ); + break; default: reader.skipType(tag & 7); break; @@ -2218,6 +2352,11 @@ export const PlacementPolicy = { message.hostAffinityRules = (object.hostAffinityRules ?? []).map((e: any) => PlacementPolicy_HostAffinityRule.fromJSON(e) ); + message.placementGroupPartition = + object.placementGroupPartition !== undefined && + object.placementGroupPartition !== null + ? Number(object.placementGroupPartition) + : 0; return message; }, @@ -2232,6 +2371,10 @@ export const PlacementPolicy = { } else { obj.hostAffinityRules = []; } + message.placementGroupPartition !== undefined && + (obj.placementGroupPartition = Math.round( + message.placementGroupPartition + )); return obj; }, @@ -2244,6 +2387,7 @@ export const PlacementPolicy = { object.hostAffinityRules?.map((e) => PlacementPolicy_HostAffinityRule.fromPartial(e) ) || []; + message.placementGroupPartition = object.placementGroupPartition ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/instance_service.ts b/src/generated/yandex/cloud/compute/v1/instance_service.ts index 9267821d..ce627eb3 100644 --- a/src/generated/yandex/cloud/compute/v1/instance_service.ts +++ b/src/generated/yandex/cloud/compute/v1/instance_service.ts @@ -18,6 +18,7 @@ import { MetadataOptions, SchedulingPolicy, NetworkSettings, + GpuSettings, PlacementPolicy, IpVersion, Instance, @@ -27,6 +28,12 @@ import { import { FieldMask } from "../../../../google/protobuf/field_mask"; import { DiskPlacementPolicy } from "../../../../yandex/cloud/compute/v1/disk"; import { Operation } from "../../../../yandex/cloud/operation/operation"; +import { + ListAccessBindingsRequest, + ListAccessBindingsResponse, + SetAccessBindingsRequest, + UpdateAccessBindingsRequest, +} from "../../../../yandex/cloud/access/access"; export const protobufPackage = "yandex.cloud.compute.v1"; @@ -97,12 +104,21 @@ export interface ListInstancesRequest { pageToken: string; /** * A filter expression that filters resources listed in the response. - * The expression must specify: - * 1. The field name. Currently you can use filtering only on the [Instance.name] field. - * 2. An `=` operator. - * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z]([-a-z0-9]{,61}[a-z0-9])?`. + * The expression consists of one or more conditions united by `AND` operator: ` [AND [<...> AND ]]`. + * + * Each condition has the form ` `, where: + * 1. `` is the field name. Currently you can use filtering only on the limited number of fields. + * 2. `` is a logical operator, one of `=`, `!=`, `IN`, `NOT IN`. + * 3. `` represents a value. + * String values should be written in double (`"`) or single (`'`) quotes. C-style escape sequences are supported (`\"` turns to `"`, `\'` to `'`, `\\` to backslash). */ filter: string; + /** + * By which column the listing should be ordered and in which direction, + * format is "createdAt desc". "id asc" if omitted. + * The default sorting order is ascending + */ + orderBy: string; } export interface ListInstancesResponse { @@ -203,6 +219,8 @@ export interface CreateInstanceRequest { serviceAccountId: string; /** Network settings. */ networkSettings?: NetworkSettings; + /** GPU settings. */ + gpuSettings?: GpuSettings; /** Placement policy configuration. */ placementPolicy?: PlacementPolicy; } @@ -830,6 +848,32 @@ export interface MoveInstanceMetadata { destinationFolderId: string; } +export interface RelocateInstanceRequest { + $type: "yandex.cloud.compute.v1.RelocateInstanceRequest"; + /** + * ID of the instance to move. + * + * To get the instance ID, make a [InstanceService.List] request. + */ + instanceId: string; + /** + * ID of the availability zone to move the instance to. + * + * To get the zone ID, make a [ZoneService.List] request. + */ + destinationZoneId: string; +} + +export interface RelocateInstanceMetadata { + $type: "yandex.cloud.compute.v1.RelocateInstanceMetadata"; + /** ID of the instance that is being moved. */ + instanceId: string; + /** ID of the availability zone that the instance is being moved from. */ + sourceZoneId: string; + /** ID of the availability zone that the instance is being moved to. */ + destinationZoneId: string; +} + export interface GuestStopInstanceMetadata { $type: "yandex.cloud.compute.v1.GuestStopInstanceMetadata"; /** ID of the instance that was stopped from guest OS. */ @@ -929,6 +973,7 @@ const baseListInstancesRequest: object = { pageSize: 0, pageToken: "", filter: "", + orderBy: "", }; export const ListInstancesRequest = { @@ -950,6 +995,9 @@ export const ListInstancesRequest = { if (message.filter !== "") { writer.uint32(34).string(message.filter); } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } return writer; }, @@ -975,6 +1023,9 @@ export const ListInstancesRequest = { case 4: message.filter = reader.string(); break; + case 5: + message.orderBy = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1001,6 +1052,10 @@ export const ListInstancesRequest = { object.filter !== undefined && object.filter !== null ? String(object.filter) : ""; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; return message; }, @@ -1011,6 +1066,7 @@ export const ListInstancesRequest = { (obj.pageSize = Math.round(message.pageSize)); message.pageToken !== undefined && (obj.pageToken = message.pageToken); message.filter !== undefined && (obj.filter = message.filter); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); return obj; }, @@ -1022,6 +1078,7 @@ export const ListInstancesRequest = { message.pageSize = object.pageSize ?? 0; message.pageToken = object.pageToken ?? ""; message.filter = object.filter ?? ""; + message.orderBy = object.orderBy ?? ""; return message; }, }; @@ -1214,6 +1271,12 @@ export const CreateInstanceRequest = { writer.uint32(122).fork() ).ldelim(); } + if (message.gpuSettings !== undefined) { + GpuSettings.encode( + message.gpuSettings, + writer.uint32(162).fork() + ).ldelim(); + } if (message.placementPolicy !== undefined) { PlacementPolicy.encode( message.placementPolicy, @@ -1325,6 +1388,9 @@ export const CreateInstanceRequest = { reader.uint32() ); break; + case 20: + message.gpuSettings = GpuSettings.decode(reader, reader.uint32()); + break; case 16: message.placementPolicy = PlacementPolicy.decode( reader, @@ -1413,6 +1479,10 @@ export const CreateInstanceRequest = { object.networkSettings !== undefined && object.networkSettings !== null ? NetworkSettings.fromJSON(object.networkSettings) : undefined; + message.gpuSettings = + object.gpuSettings !== undefined && object.gpuSettings !== null + ? GpuSettings.fromJSON(object.gpuSettings) + : undefined; message.placementPolicy = object.placementPolicy !== undefined && object.placementPolicy !== null ? PlacementPolicy.fromJSON(object.placementPolicy) @@ -1491,6 +1561,10 @@ export const CreateInstanceRequest = { (obj.networkSettings = message.networkSettings ? NetworkSettings.toJSON(message.networkSettings) : undefined); + message.gpuSettings !== undefined && + (obj.gpuSettings = message.gpuSettings + ? GpuSettings.toJSON(message.gpuSettings) + : undefined); message.placementPolicy !== undefined && (obj.placementPolicy = message.placementPolicy ? PlacementPolicy.toJSON(message.placementPolicy) @@ -1559,6 +1633,10 @@ export const CreateInstanceRequest = { object.networkSettings !== undefined && object.networkSettings !== null ? NetworkSettings.fromPartial(object.networkSettings) : undefined; + message.gpuSettings = + object.gpuSettings !== undefined && object.gpuSettings !== null + ? GpuSettings.fromPartial(object.gpuSettings) + : undefined; message.placementPolicy = object.placementPolicy !== undefined && object.placementPolicy !== null ? PlacementPolicy.fromPartial(object.placementPolicy) @@ -6116,6 +6194,195 @@ export const MoveInstanceMetadata = { messageTypeRegistry.set(MoveInstanceMetadata.$type, MoveInstanceMetadata); +const baseRelocateInstanceRequest: object = { + $type: "yandex.cloud.compute.v1.RelocateInstanceRequest", + instanceId: "", + destinationZoneId: "", +}; + +export const RelocateInstanceRequest = { + $type: "yandex.cloud.compute.v1.RelocateInstanceRequest" as const, + + encode( + message: RelocateInstanceRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + if (message.destinationZoneId !== "") { + writer.uint32(18).string(message.destinationZoneId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RelocateInstanceRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRelocateInstanceRequest, + } as RelocateInstanceRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + case 2: + message.destinationZoneId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RelocateInstanceRequest { + const message = { + ...baseRelocateInstanceRequest, + } as RelocateInstanceRequest; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + message.destinationZoneId = + object.destinationZoneId !== undefined && + object.destinationZoneId !== null + ? String(object.destinationZoneId) + : ""; + return message; + }, + + toJSON(message: RelocateInstanceRequest): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + message.destinationZoneId !== undefined && + (obj.destinationZoneId = message.destinationZoneId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RelocateInstanceRequest { + const message = { + ...baseRelocateInstanceRequest, + } as RelocateInstanceRequest; + message.instanceId = object.instanceId ?? ""; + message.destinationZoneId = object.destinationZoneId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RelocateInstanceRequest.$type, RelocateInstanceRequest); + +const baseRelocateInstanceMetadata: object = { + $type: "yandex.cloud.compute.v1.RelocateInstanceMetadata", + instanceId: "", + sourceZoneId: "", + destinationZoneId: "", +}; + +export const RelocateInstanceMetadata = { + $type: "yandex.cloud.compute.v1.RelocateInstanceMetadata" as const, + + encode( + message: RelocateInstanceMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + if (message.sourceZoneId !== "") { + writer.uint32(18).string(message.sourceZoneId); + } + if (message.destinationZoneId !== "") { + writer.uint32(26).string(message.destinationZoneId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RelocateInstanceMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRelocateInstanceMetadata, + } as RelocateInstanceMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + case 2: + message.sourceZoneId = reader.string(); + break; + case 3: + message.destinationZoneId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RelocateInstanceMetadata { + const message = { + ...baseRelocateInstanceMetadata, + } as RelocateInstanceMetadata; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + message.sourceZoneId = + object.sourceZoneId !== undefined && object.sourceZoneId !== null + ? String(object.sourceZoneId) + : ""; + message.destinationZoneId = + object.destinationZoneId !== undefined && + object.destinationZoneId !== null + ? String(object.destinationZoneId) + : ""; + return message; + }, + + toJSON(message: RelocateInstanceMetadata): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + message.sourceZoneId !== undefined && + (obj.sourceZoneId = message.sourceZoneId); + message.destinationZoneId !== undefined && + (obj.destinationZoneId = message.destinationZoneId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RelocateInstanceMetadata { + const message = { + ...baseRelocateInstanceMetadata, + } as RelocateInstanceMetadata; + message.instanceId = object.instanceId ?? ""; + message.sourceZoneId = object.sourceZoneId ?? ""; + message.destinationZoneId = object.destinationZoneId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + RelocateInstanceMetadata.$type, + RelocateInstanceMetadata +); + const baseGuestStopInstanceMetadata: object = { $type: "yandex.cloud.compute.v1.GuestStopInstanceMetadata", instanceId: "", @@ -6603,6 +6870,63 @@ export const InstanceServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** + * Moves the specified instance to another availability zone + * + * Running instance will be restarted during this operation. + */ + relocate: { + path: "/yandex.cloud.compute.v1.InstanceService/Relocate", + requestStream: false, + responseStream: false, + requestSerialize: (value: RelocateInstanceRequest) => + Buffer.from(RelocateInstanceRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + RelocateInstanceRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Lists access bindings for the instance. */ + listAccessBindings: { + path: "/yandex.cloud.compute.v1.InstanceService/ListAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAccessBindingsRequest) => + Buffer.from(ListAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListAccessBindingsRequest.decode(value), + responseSerialize: (value: ListAccessBindingsResponse) => + Buffer.from(ListAccessBindingsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListAccessBindingsResponse.decode(value), + }, + /** Sets access bindings for the instance. */ + setAccessBindings: { + path: "/yandex.cloud.compute.v1.InstanceService/SetAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: SetAccessBindingsRequest) => + Buffer.from(SetAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + SetAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates access bindings for the instance. */ + updateAccessBindings: { + path: "/yandex.cloud.compute.v1.InstanceService/UpdateAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAccessBindingsRequest) => + Buffer.from(UpdateAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, } as const; export interface InstanceServiceServer extends UntypedServiceImplementation { @@ -6691,6 +7015,21 @@ export interface InstanceServiceServer extends UntypedServiceImplementation { * that have been recorded to the source folder prior to moving will be retained. */ move: handleUnaryCall; + /** + * Moves the specified instance to another availability zone + * + * Running instance will be restarted during this operation. + */ + relocate: handleUnaryCall; + /** Lists access bindings for the instance. */ + listAccessBindings: handleUnaryCall< + ListAccessBindingsRequest, + ListAccessBindingsResponse + >; + /** Sets access bindings for the instance. */ + setAccessBindings: handleUnaryCall; + /** Updates access bindings for the instance. */ + updateAccessBindings: handleUnaryCall; } export interface InstanceServiceClient extends Client { @@ -7060,6 +7399,83 @@ export interface InstanceServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** + * Moves the specified instance to another availability zone + * + * Running instance will be restarted during this operation. + */ + relocate( + request: RelocateInstanceRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + relocate( + request: RelocateInstanceRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + relocate( + request: RelocateInstanceRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Lists access bindings for the instance. */ + listAccessBindings( + request: ListAccessBindingsRequest, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + /** Sets access bindings for the instance. */ + setAccessBindings( + request: SetAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates access bindings for the instance. */ + updateAccessBindings( + request: UpdateAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; } export const InstanceServiceClient = makeGenericClientConstructor( diff --git a/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group.ts b/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group.ts index c0dee838..4b72c9e0 100644 --- a/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group.ts +++ b/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group.ts @@ -618,6 +618,13 @@ export interface AllocationPolicy_Zone { $type: "yandex.cloud.compute.v1.instancegroup.AllocationPolicy.Zone"; /** ID of the availability zone where the instance resides. */ zoneId: string; + /** + * Each instance in a zone will be associated with exactly one of a tag from a pool below. + * All specified tags must be unique across the whole group not only the zone. + * It is guaranteed that during whole deploy only tags from prefix of the specified list will be used. + * It is possible to use tag associated with instance in templating via {instance.tag}. + */ + instanceTagsPool: string[]; } export interface InstanceTemplate { @@ -696,6 +703,15 @@ export interface InstanceTemplate { hostname: string; /** Placement Group */ placementPolicy?: PlacementPolicy; + /** + * Array of filesystems to attach to the instance. + * + * The filesystems must reside in the same availability zone as the instance. + * + * To use the instance with an attached filesystem, the latter must be mounted. + * For details, see [documentation](/docs/compute/operations/filesystem/attach-to-vm). + */ + filesystemSpecs: AttachedFilesystemSpec[]; } export interface InstanceTemplate_LabelsEntry { @@ -710,6 +726,67 @@ export interface InstanceTemplate_MetadataEntry { value: string; } +export interface AttachedFilesystemSpec { + $type: "yandex.cloud.compute.v1.instancegroup.AttachedFilesystemSpec"; + /** Mode of access to the filesystem that should be attached. */ + mode: AttachedFilesystemSpec_Mode; + /** + * Name of the device representing the filesystem on the instance. + * + * The name should be used for referencing the filesystem from within the instance + * when it's being mounted, resized etc. + * + * If not specified, a random value will be generated. + */ + deviceName: string; + /** ID of the filesystem that should be attached. */ + filesystemId: string; +} + +export enum AttachedFilesystemSpec_Mode { + MODE_UNSPECIFIED = 0, + /** READ_ONLY - Read-only access. */ + READ_ONLY = 1, + /** READ_WRITE - Read/Write access. Default value. */ + READ_WRITE = 2, + UNRECOGNIZED = -1, +} + +export function attachedFilesystemSpec_ModeFromJSON( + object: any +): AttachedFilesystemSpec_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return AttachedFilesystemSpec_Mode.MODE_UNSPECIFIED; + case 1: + case "READ_ONLY": + return AttachedFilesystemSpec_Mode.READ_ONLY; + case 2: + case "READ_WRITE": + return AttachedFilesystemSpec_Mode.READ_WRITE; + case -1: + case "UNRECOGNIZED": + default: + return AttachedFilesystemSpec_Mode.UNRECOGNIZED; + } +} + +export function attachedFilesystemSpec_ModeToJSON( + object: AttachedFilesystemSpec_Mode +): string { + switch (object) { + case AttachedFilesystemSpec_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case AttachedFilesystemSpec_Mode.READ_ONLY: + return "READ_ONLY"; + case AttachedFilesystemSpec_Mode.READ_WRITE: + return "READ_WRITE"; + default: + return "UNKNOWN"; + } +} + export interface PlacementPolicy { $type: "yandex.cloud.compute.v1.instancegroup.PlacementPolicy"; /** Identifier of placement group */ @@ -804,6 +881,8 @@ export interface AttachedDiskSpec { diskSpec?: AttachedDiskSpec_DiskSpec; /** Set to use an existing disk. To set use variables. */ diskId: string; + /** When set can be later used to change DiskSpec of actual disk. */ + name: string; } export enum AttachedDiskSpec_Mode { @@ -984,13 +1063,15 @@ export function networkSettings_TypeToJSON( export interface LoadBalancerSpec { $type: "yandex.cloud.compute.v1.instancegroup.LoadBalancerSpec"; - /** Specification of the target group that the instance group will be added to. For more information, see [Target groups and resources](/docs/load-balancer/concepts/target-resources). */ + /** Specification of the target group that the instance group will be added to. For more information, see [Target groups and resources](/docs/network-load-balancer/concepts/target-resources). */ targetGroupSpec?: TargetGroupSpec; /** * Timeout for waiting for the VM to be checked by the load balancer. If the timeout is exceeded, * the VM will be turned off based on the deployment policy. Specified in seconds. */ maxOpeningTrafficDuration?: Duration; + /** Do not wait load balancer health checks. */ + ignoreHealthChecks: boolean; } export interface TargetGroupSpec { @@ -1018,6 +1099,8 @@ export interface ApplicationLoadBalancerSpec { * the VM will be turned off based on the deployment policy. Specified in seconds. */ maxOpeningTrafficDuration?: Duration; + /** Do not wait load balancer health checks. */ + ignoreHealthChecks: boolean; } export interface ApplicationTargetGroupSpec { @@ -1038,7 +1121,7 @@ export interface ApplicationTargetGroupSpec_LabelsEntry { export interface HealthChecksSpec { $type: "yandex.cloud.compute.v1.instancegroup.HealthChecksSpec"; - /** Health checking specification. For more information, see [Health check](/docs/load-balancer/concepts/health-check). */ + /** Health checking specification. For more information, see [Health check](/docs/network-load-balancer/concepts/health-check). */ healthCheckSpecs: HealthCheckSpec[]; /** * Timeout for waiting for the VM to become healthy. If the timeout is exceeded, @@ -1098,6 +1181,8 @@ export interface ManagedInstance { networkInterfaces: NetworkInterface[]; /** The timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format when the status of the managed instance was last changed. */ statusChangedAt?: Date; + /** Managed instance tag. */ + instanceTag: string; } export enum ManagedInstance_Status { @@ -3378,6 +3463,7 @@ messageTypeRegistry.set(AllocationPolicy.$type, AllocationPolicy); const baseAllocationPolicy_Zone: object = { $type: "yandex.cloud.compute.v1.instancegroup.AllocationPolicy.Zone", zoneId: "", + instanceTagsPool: "", }; export const AllocationPolicy_Zone = { @@ -3390,6 +3476,9 @@ export const AllocationPolicy_Zone = { if (message.zoneId !== "") { writer.uint32(10).string(message.zoneId); } + for (const v of message.instanceTagsPool) { + writer.uint32(18).string(v!); + } return writer; }, @@ -3400,12 +3489,16 @@ export const AllocationPolicy_Zone = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseAllocationPolicy_Zone } as AllocationPolicy_Zone; + message.instanceTagsPool = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: message.zoneId = reader.string(); break; + case 2: + message.instanceTagsPool.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -3420,12 +3513,20 @@ export const AllocationPolicy_Zone = { object.zoneId !== undefined && object.zoneId !== null ? String(object.zoneId) : ""; + message.instanceTagsPool = (object.instanceTagsPool ?? []).map((e: any) => + String(e) + ); return message; }, toJSON(message: AllocationPolicy_Zone): unknown { const obj: any = {}; message.zoneId !== undefined && (obj.zoneId = message.zoneId); + if (message.instanceTagsPool) { + obj.instanceTagsPool = message.instanceTagsPool.map((e) => e); + } else { + obj.instanceTagsPool = []; + } return obj; }, @@ -3434,6 +3535,7 @@ export const AllocationPolicy_Zone = { ): AllocationPolicy_Zone { const message = { ...baseAllocationPolicy_Zone } as AllocationPolicy_Zone; message.zoneId = object.zoneId ?? ""; + message.instanceTagsPool = object.instanceTagsPool?.map((e) => e) || []; return message; }, }; @@ -3529,6 +3631,9 @@ export const InstanceTemplate = { writer.uint32(114).fork() ).ldelim(); } + for (const v of message.filesystemSpecs) { + AttachedFilesystemSpec.encode(v!, writer.uint32(122).fork()).ldelim(); + } return writer; }, @@ -3540,6 +3645,7 @@ export const InstanceTemplate = { message.metadata = {}; message.secondaryDiskSpecs = []; message.networkInterfaceSpecs = []; + message.filesystemSpecs = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -3613,6 +3719,11 @@ export const InstanceTemplate = { reader.uint32() ); break; + case 15: + message.filesystemSpecs.push( + AttachedFilesystemSpec.decode(reader, reader.uint32()) + ); + break; default: reader.skipType(tag & 7); break; @@ -3681,6 +3792,9 @@ export const InstanceTemplate = { object.placementPolicy !== undefined && object.placementPolicy !== null ? PlacementPolicy.fromJSON(object.placementPolicy) : undefined; + message.filesystemSpecs = (object.filesystemSpecs ?? []).map((e: any) => + AttachedFilesystemSpec.fromJSON(e) + ); return message; }, @@ -3739,6 +3853,13 @@ export const InstanceTemplate = { (obj.placementPolicy = message.placementPolicy ? PlacementPolicy.toJSON(message.placementPolicy) : undefined); + if (message.filesystemSpecs) { + obj.filesystemSpecs = message.filesystemSpecs.map((e) => + e ? AttachedFilesystemSpec.toJSON(e) : undefined + ); + } else { + obj.filesystemSpecs = []; + } return obj; }, @@ -3794,6 +3915,10 @@ export const InstanceTemplate = { object.placementPolicy !== undefined && object.placementPolicy !== null ? PlacementPolicy.fromPartial(object.placementPolicy) : undefined; + message.filesystemSpecs = + object.filesystemSpecs?.map((e) => + AttachedFilesystemSpec.fromPartial(e) + ) || []; return message; }, }; @@ -3972,6 +4097,100 @@ messageTypeRegistry.set( InstanceTemplate_MetadataEntry ); +const baseAttachedFilesystemSpec: object = { + $type: "yandex.cloud.compute.v1.instancegroup.AttachedFilesystemSpec", + mode: 0, + deviceName: "", + filesystemId: "", +}; + +export const AttachedFilesystemSpec = { + $type: + "yandex.cloud.compute.v1.instancegroup.AttachedFilesystemSpec" as const, + + encode( + message: AttachedFilesystemSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.deviceName !== "") { + writer.uint32(18).string(message.deviceName); + } + if (message.filesystemId !== "") { + writer.uint32(26).string(message.filesystemId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AttachedFilesystemSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAttachedFilesystemSpec } as AttachedFilesystemSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.deviceName = reader.string(); + break; + case 3: + message.filesystemId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AttachedFilesystemSpec { + const message = { ...baseAttachedFilesystemSpec } as AttachedFilesystemSpec; + message.mode = + object.mode !== undefined && object.mode !== null + ? attachedFilesystemSpec_ModeFromJSON(object.mode) + : 0; + message.deviceName = + object.deviceName !== undefined && object.deviceName !== null + ? String(object.deviceName) + : ""; + message.filesystemId = + object.filesystemId !== undefined && object.filesystemId !== null + ? String(object.filesystemId) + : ""; + return message; + }, + + toJSON(message: AttachedFilesystemSpec): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = attachedFilesystemSpec_ModeToJSON(message.mode)); + message.deviceName !== undefined && (obj.deviceName = message.deviceName); + message.filesystemId !== undefined && + (obj.filesystemId = message.filesystemId); + return obj; + }, + + fromPartial, I>>( + object: I + ): AttachedFilesystemSpec { + const message = { ...baseAttachedFilesystemSpec } as AttachedFilesystemSpec; + message.mode = object.mode ?? 0; + message.deviceName = object.deviceName ?? ""; + message.filesystemId = object.filesystemId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AttachedFilesystemSpec.$type, AttachedFilesystemSpec); + const basePlacementPolicy: object = { $type: "yandex.cloud.compute.v1.instancegroup.PlacementPolicy", placementGroupId: "", @@ -4271,6 +4490,7 @@ const baseAttachedDiskSpec: object = { mode: 0, deviceName: "", diskId: "", + name: "", }; export const AttachedDiskSpec = { @@ -4295,6 +4515,9 @@ export const AttachedDiskSpec = { if (message.diskId !== "") { writer.uint32(34).string(message.diskId); } + if (message.name !== "") { + writer.uint32(58).string(message.name); + } return writer; }, @@ -4320,6 +4543,9 @@ export const AttachedDiskSpec = { case 4: message.diskId = reader.string(); break; + case 7: + message.name = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -4346,6 +4572,10 @@ export const AttachedDiskSpec = { object.diskId !== undefined && object.diskId !== null ? String(object.diskId) : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; return message; }, @@ -4359,6 +4589,7 @@ export const AttachedDiskSpec = { ? AttachedDiskSpec_DiskSpec.toJSON(message.diskSpec) : undefined); message.diskId !== undefined && (obj.diskId = message.diskId); + message.name !== undefined && (obj.name = message.name); return obj; }, @@ -4373,6 +4604,7 @@ export const AttachedDiskSpec = { ? AttachedDiskSpec_DiskSpec.fromPartial(object.diskSpec) : undefined; message.diskId = object.diskId ?? ""; + message.name = object.name ?? ""; return message; }, }; @@ -5101,6 +5333,7 @@ messageTypeRegistry.set(NetworkSettings.$type, NetworkSettings); const baseLoadBalancerSpec: object = { $type: "yandex.cloud.compute.v1.instancegroup.LoadBalancerSpec", + ignoreHealthChecks: false, }; export const LoadBalancerSpec = { @@ -5122,6 +5355,9 @@ export const LoadBalancerSpec = { writer.uint32(18).fork() ).ldelim(); } + if (message.ignoreHealthChecks === true) { + writer.uint32(32).bool(message.ignoreHealthChecks); + } return writer; }, @@ -5144,6 +5380,9 @@ export const LoadBalancerSpec = { reader.uint32() ); break; + case 4: + message.ignoreHealthChecks = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -5163,6 +5402,11 @@ export const LoadBalancerSpec = { object.maxOpeningTrafficDuration !== null ? Duration.fromJSON(object.maxOpeningTrafficDuration) : undefined; + message.ignoreHealthChecks = + object.ignoreHealthChecks !== undefined && + object.ignoreHealthChecks !== null + ? Boolean(object.ignoreHealthChecks) + : false; return message; }, @@ -5176,6 +5420,8 @@ export const LoadBalancerSpec = { (obj.maxOpeningTrafficDuration = message.maxOpeningTrafficDuration ? Duration.toJSON(message.maxOpeningTrafficDuration) : undefined); + message.ignoreHealthChecks !== undefined && + (obj.ignoreHealthChecks = message.ignoreHealthChecks); return obj; }, @@ -5192,6 +5438,7 @@ export const LoadBalancerSpec = { object.maxOpeningTrafficDuration !== null ? Duration.fromPartial(object.maxOpeningTrafficDuration) : undefined; + message.ignoreHealthChecks = object.ignoreHealthChecks ?? false; return message; }, }; @@ -5403,6 +5650,7 @@ messageTypeRegistry.set( const baseApplicationLoadBalancerSpec: object = { $type: "yandex.cloud.compute.v1.instancegroup.ApplicationLoadBalancerSpec", + ignoreHealthChecks: false, }; export const ApplicationLoadBalancerSpec = { @@ -5425,6 +5673,9 @@ export const ApplicationLoadBalancerSpec = { writer.uint32(18).fork() ).ldelim(); } + if (message.ignoreHealthChecks === true) { + writer.uint32(24).bool(message.ignoreHealthChecks); + } return writer; }, @@ -5452,6 +5703,9 @@ export const ApplicationLoadBalancerSpec = { reader.uint32() ); break; + case 3: + message.ignoreHealthChecks = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -5473,6 +5727,11 @@ export const ApplicationLoadBalancerSpec = { object.maxOpeningTrafficDuration !== null ? Duration.fromJSON(object.maxOpeningTrafficDuration) : undefined; + message.ignoreHealthChecks = + object.ignoreHealthChecks !== undefined && + object.ignoreHealthChecks !== null + ? Boolean(object.ignoreHealthChecks) + : false; return message; }, @@ -5486,6 +5745,8 @@ export const ApplicationLoadBalancerSpec = { (obj.maxOpeningTrafficDuration = message.maxOpeningTrafficDuration ? Duration.toJSON(message.maxOpeningTrafficDuration) : undefined); + message.ignoreHealthChecks !== undefined && + (obj.ignoreHealthChecks = message.ignoreHealthChecks); return obj; }, @@ -5504,6 +5765,7 @@ export const ApplicationLoadBalancerSpec = { object.maxOpeningTrafficDuration !== null ? Duration.fromPartial(object.maxOpeningTrafficDuration) : undefined; + message.ignoreHealthChecks = object.ignoreHealthChecks ?? false; return message; }, }; @@ -6160,6 +6422,7 @@ const baseManagedInstance: object = { name: "", statusMessage: "", zoneId: "", + instanceTag: "", }; export const ManagedInstance = { @@ -6199,6 +6462,9 @@ export const ManagedInstance = { writer.uint32(74).fork() ).ldelim(); } + if (message.instanceTag !== "") { + writer.uint32(114).string(message.instanceTag); + } return writer; }, @@ -6241,6 +6507,9 @@ export const ManagedInstance = { Timestamp.decode(reader, reader.uint32()) ); break; + case 14: + message.instanceTag = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -6284,6 +6553,10 @@ export const ManagedInstance = { object.statusChangedAt !== undefined && object.statusChangedAt !== null ? fromJsonTimestamp(object.statusChangedAt) : undefined; + message.instanceTag = + object.instanceTag !== undefined && object.instanceTag !== null + ? String(object.instanceTag) + : ""; return message; }, @@ -6307,6 +6580,8 @@ export const ManagedInstance = { } message.statusChangedAt !== undefined && (obj.statusChangedAt = message.statusChangedAt.toISOString()); + message.instanceTag !== undefined && + (obj.instanceTag = message.instanceTag); return obj; }, @@ -6325,6 +6600,7 @@ export const ManagedInstance = { object.networkInterfaces?.map((e) => NetworkInterface.fromPartial(e)) || []; message.statusChangedAt = object.statusChangedAt ?? undefined; + message.instanceTag = object.instanceTag ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts b/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts index 78804e17..ddb91a31 100644 --- a/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts +++ b/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts @@ -148,7 +148,7 @@ export interface CreateInstanceGroupRequest { * and attributed to the instance group. */ loadBalancerSpec?: LoadBalancerSpec; - /** Health checking specification. For more information, see [Health check](/docs/load-balancer/concepts/health-check). */ + /** Health checking specification. For more information, see [Health check](/docs/network-load-balancer/concepts/health-check). */ healthChecksSpec?: HealthChecksSpec; /** * ID of the service account. The service account will be used for all API calls @@ -225,7 +225,7 @@ export interface UpdateInstanceGroupRequest { deployPolicy?: DeployPolicy; /** Allocation policy of the instance group by zones and regions. */ allocationPolicy?: AllocationPolicy; - /** Health checking specification. For more information, see [Health check](/docs/load-balancer/concepts/health-check). */ + /** Health checking specification. For more information, see [Health check](/docs/network-load-balancer/concepts/health-check). */ healthChecksSpec?: HealthChecksSpec; /** * ID of the service account. The service account will be used for all API calls @@ -304,6 +304,46 @@ export interface StopInstanceGroupMetadata { instanceGroupId: string; } +export interface RollingRestartRequest { + $type: "yandex.cloud.compute.v1.instancegroup.RollingRestartRequest"; + /** + * ID of the instance group to restart instances in. + * To get the instance group ID, use a [InstanceGroupService.List] request. + */ + instanceGroupId: string; + /** + * IDs of managed instances in the group to restart + * To get instance IDs, use a [InstanceGroupService.ListInstances] request. + */ + managedInstanceIds: string[]; +} + +export interface RollingRestartMetadata { + $type: "yandex.cloud.compute.v1.instancegroup.RollingRestartMetadata"; + /** ID of the InstanceGroup resource that is being rolling restarted. */ + instanceGroupId: string; +} + +export interface RollingRecreateRequest { + $type: "yandex.cloud.compute.v1.instancegroup.RollingRecreateRequest"; + /** + * ID of the instance group to recreate instances in. + * To get the instance group ID, use a [InstanceGroupService.List] request. + */ + instanceGroupId: string; + /** + * IDs of managed instances in the group to recreate + * To get instance IDs, use a [InstanceGroupService.ListInstances] request. + */ + managedInstanceIds: string[]; +} + +export interface RollingRecreateMetadata { + $type: "yandex.cloud.compute.v1.instancegroup.RollingRecreateMetadata"; + /** ID of the InstanceGroup resource that is being rolling recreated. */ + instanceGroupId: string; +} + export interface DeleteInstanceGroupRequest { $type: "yandex.cloud.compute.v1.instancegroup.DeleteInstanceGroupRequest"; /** @@ -2480,6 +2520,313 @@ messageTypeRegistry.set( StopInstanceGroupMetadata ); +const baseRollingRestartRequest: object = { + $type: "yandex.cloud.compute.v1.instancegroup.RollingRestartRequest", + instanceGroupId: "", + managedInstanceIds: "", +}; + +export const RollingRestartRequest = { + $type: "yandex.cloud.compute.v1.instancegroup.RollingRestartRequest" as const, + + encode( + message: RollingRestartRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceGroupId !== "") { + writer.uint32(10).string(message.instanceGroupId); + } + for (const v of message.managedInstanceIds) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RollingRestartRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRollingRestartRequest } as RollingRestartRequest; + message.managedInstanceIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceGroupId = reader.string(); + break; + case 2: + message.managedInstanceIds.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RollingRestartRequest { + const message = { ...baseRollingRestartRequest } as RollingRestartRequest; + message.instanceGroupId = + object.instanceGroupId !== undefined && object.instanceGroupId !== null + ? String(object.instanceGroupId) + : ""; + message.managedInstanceIds = (object.managedInstanceIds ?? []).map( + (e: any) => String(e) + ); + return message; + }, + + toJSON(message: RollingRestartRequest): unknown { + const obj: any = {}; + message.instanceGroupId !== undefined && + (obj.instanceGroupId = message.instanceGroupId); + if (message.managedInstanceIds) { + obj.managedInstanceIds = message.managedInstanceIds.map((e) => e); + } else { + obj.managedInstanceIds = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): RollingRestartRequest { + const message = { ...baseRollingRestartRequest } as RollingRestartRequest; + message.instanceGroupId = object.instanceGroupId ?? ""; + message.managedInstanceIds = object.managedInstanceIds?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(RollingRestartRequest.$type, RollingRestartRequest); + +const baseRollingRestartMetadata: object = { + $type: "yandex.cloud.compute.v1.instancegroup.RollingRestartMetadata", + instanceGroupId: "", +}; + +export const RollingRestartMetadata = { + $type: + "yandex.cloud.compute.v1.instancegroup.RollingRestartMetadata" as const, + + encode( + message: RollingRestartMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceGroupId !== "") { + writer.uint32(10).string(message.instanceGroupId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RollingRestartMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRollingRestartMetadata } as RollingRestartMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceGroupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RollingRestartMetadata { + const message = { ...baseRollingRestartMetadata } as RollingRestartMetadata; + message.instanceGroupId = + object.instanceGroupId !== undefined && object.instanceGroupId !== null + ? String(object.instanceGroupId) + : ""; + return message; + }, + + toJSON(message: RollingRestartMetadata): unknown { + const obj: any = {}; + message.instanceGroupId !== undefined && + (obj.instanceGroupId = message.instanceGroupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RollingRestartMetadata { + const message = { ...baseRollingRestartMetadata } as RollingRestartMetadata; + message.instanceGroupId = object.instanceGroupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RollingRestartMetadata.$type, RollingRestartMetadata); + +const baseRollingRecreateRequest: object = { + $type: "yandex.cloud.compute.v1.instancegroup.RollingRecreateRequest", + instanceGroupId: "", + managedInstanceIds: "", +}; + +export const RollingRecreateRequest = { + $type: + "yandex.cloud.compute.v1.instancegroup.RollingRecreateRequest" as const, + + encode( + message: RollingRecreateRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceGroupId !== "") { + writer.uint32(10).string(message.instanceGroupId); + } + for (const v of message.managedInstanceIds) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RollingRecreateRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRollingRecreateRequest } as RollingRecreateRequest; + message.managedInstanceIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceGroupId = reader.string(); + break; + case 2: + message.managedInstanceIds.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RollingRecreateRequest { + const message = { ...baseRollingRecreateRequest } as RollingRecreateRequest; + message.instanceGroupId = + object.instanceGroupId !== undefined && object.instanceGroupId !== null + ? String(object.instanceGroupId) + : ""; + message.managedInstanceIds = (object.managedInstanceIds ?? []).map( + (e: any) => String(e) + ); + return message; + }, + + toJSON(message: RollingRecreateRequest): unknown { + const obj: any = {}; + message.instanceGroupId !== undefined && + (obj.instanceGroupId = message.instanceGroupId); + if (message.managedInstanceIds) { + obj.managedInstanceIds = message.managedInstanceIds.map((e) => e); + } else { + obj.managedInstanceIds = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): RollingRecreateRequest { + const message = { ...baseRollingRecreateRequest } as RollingRecreateRequest; + message.instanceGroupId = object.instanceGroupId ?? ""; + message.managedInstanceIds = object.managedInstanceIds?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(RollingRecreateRequest.$type, RollingRecreateRequest); + +const baseRollingRecreateMetadata: object = { + $type: "yandex.cloud.compute.v1.instancegroup.RollingRecreateMetadata", + instanceGroupId: "", +}; + +export const RollingRecreateMetadata = { + $type: + "yandex.cloud.compute.v1.instancegroup.RollingRecreateMetadata" as const, + + encode( + message: RollingRecreateMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceGroupId !== "") { + writer.uint32(10).string(message.instanceGroupId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RollingRecreateMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRollingRecreateMetadata, + } as RollingRecreateMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceGroupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RollingRecreateMetadata { + const message = { + ...baseRollingRecreateMetadata, + } as RollingRecreateMetadata; + message.instanceGroupId = + object.instanceGroupId !== undefined && object.instanceGroupId !== null + ? String(object.instanceGroupId) + : ""; + return message; + }, + + toJSON(message: RollingRecreateMetadata): unknown { + const obj: any = {}; + message.instanceGroupId !== undefined && + (obj.instanceGroupId = message.instanceGroupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RollingRecreateMetadata { + const message = { + ...baseRollingRecreateMetadata, + } as RollingRecreateMetadata; + message.instanceGroupId = object.instanceGroupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RollingRecreateMetadata.$type, RollingRecreateMetadata); + const baseDeleteInstanceGroupRequest: object = { $type: "yandex.cloud.compute.v1.instancegroup.DeleteInstanceGroupRequest", instanceGroupId: "", @@ -3929,6 +4276,36 @@ export const InstanceGroupServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** + * Performs rolling restart of specified instances for the specified instance group. + * Rolling restart does restart of instances respecting all group policies. + */ + rollingRestart: { + path: "/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/RollingRestart", + requestStream: false, + responseStream: false, + requestSerialize: (value: RollingRestartRequest) => + Buffer.from(RollingRestartRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RollingRestartRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Performs rolling recreate of specified instances for the specified instance group. + * Rolling recreate does recreate of instance VMs respecting all group policies. + */ + rollingRecreate: { + path: "/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/RollingRecreate", + requestStream: false, + responseStream: false, + requestSerialize: (value: RollingRecreateRequest) => + Buffer.from(RollingRecreateRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RollingRecreateRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Starts the specified instance group. */ start: { path: "/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/Start", @@ -4133,6 +4510,16 @@ export interface InstanceGroupServiceServer >; /** Stops the specified instance group. */ stop: handleUnaryCall; + /** + * Performs rolling restart of specified instances for the specified instance group. + * Rolling restart does restart of instances respecting all group policies. + */ + rollingRestart: handleUnaryCall; + /** + * Performs rolling recreate of specified instances for the specified instance group. + * Rolling recreate does recreate of instance VMs respecting all group policies. + */ + rollingRecreate: handleUnaryCall; /** Starts the specified instance group. */ start: handleUnaryCall; /** Deletes the specified instance group. */ @@ -4321,6 +4708,44 @@ export interface InstanceGroupServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** + * Performs rolling restart of specified instances for the specified instance group. + * Rolling restart does restart of instances respecting all group policies. + */ + rollingRestart( + request: RollingRestartRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + rollingRestart( + request: RollingRestartRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + rollingRestart( + request: RollingRestartRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Performs rolling recreate of specified instances for the specified instance group. + * Rolling recreate does recreate of instance VMs respecting all group policies. + */ + rollingRecreate( + request: RollingRecreateRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + rollingRecreate( + request: RollingRecreateRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + rollingRecreate( + request: RollingRecreateRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Starts the specified instance group. */ start( request: StartInstanceGroupRequest, diff --git a/src/generated/yandex/cloud/compute/v1/placement_group.ts b/src/generated/yandex/cloud/compute/v1/placement_group.ts index 8ac2edc8..320c6046 100644 --- a/src/generated/yandex/cloud/compute/v1/placement_group.ts +++ b/src/generated/yandex/cloud/compute/v1/placement_group.ts @@ -28,6 +28,7 @@ export interface PlacementGroup { * over distinct failure domains. */ spreadPlacementStrategy?: SpreadPlacementStrategy | undefined; + partitionPlacementStrategy?: PartitionPlacementStrategy | undefined; } export interface PlacementGroup_LabelsEntry { @@ -44,6 +45,11 @@ export interface SpreadPlacementStrategy { $type: "yandex.cloud.compute.v1.SpreadPlacementStrategy"; } +export interface PartitionPlacementStrategy { + $type: "yandex.cloud.compute.v1.PartitionPlacementStrategy"; + partitions: number; +} + const basePlacementGroup: object = { $type: "yandex.cloud.compute.v1.PlacementGroup", id: "", @@ -93,6 +99,12 @@ export const PlacementGroup = { writer.uint32(58).fork() ).ldelim(); } + if (message.partitionPlacementStrategy !== undefined) { + PartitionPlacementStrategy.encode( + message.partitionPlacementStrategy, + writer.uint32(66).fork() + ).ldelim(); + } return writer; }, @@ -136,6 +148,10 @@ export const PlacementGroup = { reader.uint32() ); break; + case 8: + message.partitionPlacementStrategy = + PartitionPlacementStrategy.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -175,6 +191,11 @@ export const PlacementGroup = { object.spreadPlacementStrategy !== null ? SpreadPlacementStrategy.fromJSON(object.spreadPlacementStrategy) : undefined; + message.partitionPlacementStrategy = + object.partitionPlacementStrategy !== undefined && + object.partitionPlacementStrategy !== null + ? PartitionPlacementStrategy.fromJSON(object.partitionPlacementStrategy) + : undefined; return message; }, @@ -197,6 +218,10 @@ export const PlacementGroup = { (obj.spreadPlacementStrategy = message.spreadPlacementStrategy ? SpreadPlacementStrategy.toJSON(message.spreadPlacementStrategy) : undefined); + message.partitionPlacementStrategy !== undefined && + (obj.partitionPlacementStrategy = message.partitionPlacementStrategy + ? PartitionPlacementStrategy.toJSON(message.partitionPlacementStrategy) + : undefined); return obj; }, @@ -222,6 +247,13 @@ export const PlacementGroup = { object.spreadPlacementStrategy !== null ? SpreadPlacementStrategy.fromPartial(object.spreadPlacementStrategy) : undefined; + message.partitionPlacementStrategy = + object.partitionPlacementStrategy !== undefined && + object.partitionPlacementStrategy !== null + ? PartitionPlacementStrategy.fromPartial( + object.partitionPlacementStrategy + ) + : undefined; return message; }, }; @@ -371,6 +403,92 @@ export const SpreadPlacementStrategy = { messageTypeRegistry.set(SpreadPlacementStrategy.$type, SpreadPlacementStrategy); +const basePartitionPlacementStrategy: object = { + $type: "yandex.cloud.compute.v1.PartitionPlacementStrategy", + partitions: 0, +}; + +export const PartitionPlacementStrategy = { + $type: "yandex.cloud.compute.v1.PartitionPlacementStrategy" as const, + + encode( + message: PartitionPlacementStrategy, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.partitions !== 0) { + writer.uint32(8).int64(message.partitions); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PartitionPlacementStrategy { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePartitionPlacementStrategy, + } as PartitionPlacementStrategy; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.partitions = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PartitionPlacementStrategy { + const message = { + ...basePartitionPlacementStrategy, + } as PartitionPlacementStrategy; + message.partitions = + object.partitions !== undefined && object.partitions !== null + ? Number(object.partitions) + : 0; + return message; + }, + + toJSON(message: PartitionPlacementStrategy): unknown { + const obj: any = {}; + message.partitions !== undefined && + (obj.partitions = Math.round(message.partitions)); + return obj; + }, + + fromPartial, I>>( + object: I + ): PartitionPlacementStrategy { + const message = { + ...basePartitionPlacementStrategy, + } as PartitionPlacementStrategy; + message.partitions = object.partitions ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + PartitionPlacementStrategy.$type, + PartitionPlacementStrategy +); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + type Builtin = | Date | Function @@ -420,6 +538,13 @@ function fromJsonTimestamp(o: any): Date { } } +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + if (_m0.util.Long !== Long) { _m0.util.Long = Long as any; _m0.configure(); diff --git a/src/generated/yandex/cloud/compute/v1/placement_group_service.ts b/src/generated/yandex/cloud/compute/v1/placement_group_service.ts index 2e117165..6b6ab38b 100644 --- a/src/generated/yandex/cloud/compute/v1/placement_group_service.ts +++ b/src/generated/yandex/cloud/compute/v1/placement_group_service.ts @@ -18,6 +18,7 @@ import { FieldMask } from "../../../../google/protobuf/field_mask"; import { PlacementGroup, SpreadPlacementStrategy, + PartitionPlacementStrategy, } from "../../../../yandex/cloud/compute/v1/placement_group"; import { Instance } from "../../../../yandex/cloud/compute/v1/instance"; import { Operation } from "../../../../yandex/cloud/operation/operation"; @@ -57,9 +58,21 @@ export interface ListPlacementGroupsRequest { pageToken: string; /** * A filter expression that filters resources listed in the response. - * Currently you can use filtering only on the [PlacementGroup.name] field. + * The expression consists of one or more conditions united by `AND` operator: ` [AND [<...> AND ]]`. + * + * Each condition has the form ` `, where: + * 1. `` is the field name. Currently you can use filtering only on the limited number of fields. + * 2. `` is a logical operator, one of `=`, `!=`, `IN`, `NOT IN`. + * 3. `` represents a value. + * String values should be written in double (`"`) or single (`'`) quotes. C-style escape sequences are supported (`\"` turns to `"`, `\'` to `'`, `\\` to backslash). */ filter: string; + /** + * By which column the listing should be ordered and in which direction, + * format is "createdAt desc". "id asc" if omitted. + * The default sorting order is ascending + */ + orderBy: string; } export interface ListPlacementGroupsResponse { @@ -92,6 +105,7 @@ export interface CreatePlacementGroupRequest { labels: { [key: string]: string }; /** Anti-affinity placement strategy (`spread`). Instances are distributed over distinct failure domains. */ spreadPlacementStrategy?: SpreadPlacementStrategy | undefined; + partitionPlacementStrategy?: PartitionPlacementStrategy | undefined; } export interface CreatePlacementGroupRequest_LabelsEntry { @@ -309,6 +323,7 @@ const baseListPlacementGroupsRequest: object = { pageSize: 0, pageToken: "", filter: "", + orderBy: "", }; export const ListPlacementGroupsRequest = { @@ -330,6 +345,9 @@ export const ListPlacementGroupsRequest = { if (message.filter !== "") { writer.uint32(34).string(message.filter); } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } return writer; }, @@ -357,6 +375,9 @@ export const ListPlacementGroupsRequest = { case 4: message.filter = reader.string(); break; + case 5: + message.orderBy = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -385,6 +406,10 @@ export const ListPlacementGroupsRequest = { object.filter !== undefined && object.filter !== null ? String(object.filter) : ""; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; return message; }, @@ -395,6 +420,7 @@ export const ListPlacementGroupsRequest = { (obj.pageSize = Math.round(message.pageSize)); message.pageToken !== undefined && (obj.pageToken = message.pageToken); message.filter !== undefined && (obj.filter = message.filter); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); return obj; }, @@ -408,6 +434,7 @@ export const ListPlacementGroupsRequest = { message.pageSize = object.pageSize ?? 0; message.pageToken = object.pageToken ?? ""; message.filter = object.filter ?? ""; + message.orderBy = object.orderBy ?? ""; return message; }, }; @@ -553,6 +580,12 @@ export const CreatePlacementGroupRequest = { writer.uint32(42).fork() ).ldelim(); } + if (message.partitionPlacementStrategy !== undefined) { + PartitionPlacementStrategy.encode( + message.partitionPlacementStrategy, + writer.uint32(50).fork() + ).ldelim(); + } return writer; }, @@ -593,6 +626,10 @@ export const CreatePlacementGroupRequest = { reader.uint32() ); break; + case 6: + message.partitionPlacementStrategy = + PartitionPlacementStrategy.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -628,6 +665,11 @@ export const CreatePlacementGroupRequest = { object.spreadPlacementStrategy !== null ? SpreadPlacementStrategy.fromJSON(object.spreadPlacementStrategy) : undefined; + message.partitionPlacementStrategy = + object.partitionPlacementStrategy !== undefined && + object.partitionPlacementStrategy !== null + ? PartitionPlacementStrategy.fromJSON(object.partitionPlacementStrategy) + : undefined; return message; }, @@ -647,6 +689,10 @@ export const CreatePlacementGroupRequest = { (obj.spreadPlacementStrategy = message.spreadPlacementStrategy ? SpreadPlacementStrategy.toJSON(message.spreadPlacementStrategy) : undefined); + message.partitionPlacementStrategy !== undefined && + (obj.partitionPlacementStrategy = message.partitionPlacementStrategy + ? PartitionPlacementStrategy.toJSON(message.partitionPlacementStrategy) + : undefined); return obj; }, @@ -672,6 +718,13 @@ export const CreatePlacementGroupRequest = { object.spreadPlacementStrategy !== null ? SpreadPlacementStrategy.fromPartial(object.spreadPlacementStrategy) : undefined; + message.partitionPlacementStrategy = + object.partitionPlacementStrategy !== undefined && + object.partitionPlacementStrategy !== null + ? PartitionPlacementStrategy.fromPartial( + object.partitionPlacementStrategy + ) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/snapshot_schedule.ts b/src/generated/yandex/cloud/compute/v1/snapshot_schedule.ts index 0fdc0d79..4027436a 100644 --- a/src/generated/yandex/cloud/compute/v1/snapshot_schedule.ts +++ b/src/generated/yandex/cloud/compute/v1/snapshot_schedule.ts @@ -7,37 +7,58 @@ import { Duration } from "../../../../google/protobuf/duration"; export const protobufPackage = "yandex.cloud.compute.v1"; +/** A snapshot schedule. For details about the concept, see [documentation](/docs/compute/concepts/snapshot-schedule). */ export interface SnapshotSchedule { $type: "yandex.cloud.compute.v1.SnapshotSchedule"; - /** ID of the snapshot schedule policy. */ + /** ID of the snapshot schedule. */ id: string; - /** ID of the folder that the scheduler policy belongs to. */ + /** ID of the folder that the snapshot schedule belongs to. */ folderId: string; + /** Creation timestamp. */ createdAt?: Date; /** - * Name of the schedule policy. + * Name of the snapshot schedule. + * * The name is unique within the folder. */ name: string; - /** Description of the schedule policy. */ + /** Description of the snapshot schedule. */ description: string; - /** Resource labels as `key:value` pairs. */ + /** Snapshot schedule labels as `key:value` pairs. */ labels: { [key: string]: string }; + /** Status of the snapshot schedule. */ status: SnapshotSchedule_Status; - /** schedule properties */ + /** Frequency settings of the snapshot schedule. */ schedulePolicy?: SchedulePolicy; + /** + * Retention period of the snapshot schedule. Once a snapshot created by the schedule reaches this age, it is + * automatically deleted. + */ retentionPeriod?: Duration | undefined; + /** + * Retention count of the snapshot schedule. Once the number of snapshots created by the schedule exceeds this + * number, the oldest ones are automatically deleted. E.g. if the number is 5, the first snapshot is deleted + * after the sixth one is created, the second is deleted after the seventh one is created, and so on. + */ snapshotCount: number | undefined; - /** properties to create snapshot with. */ + /** Attributes of snapshots created by the snapshot schedule. */ snapshotSpec?: SnapshotSpec; } export enum SnapshotSchedule_Status { STATUS_UNSPECIFIED = 0, + /** CREATING - The snapshot schedule is being created. */ CREATING = 1, + /** + * ACTIVE - The snapshot schedule is on: new disk snapshots will be created, old ones deleted + * (if [SnapshotSchedule.retention_policy] is specified). + */ ACTIVE = 2, + /** INACTIVE - The schedule is interrupted, snapshots won't be created or deleted. */ INACTIVE = 3, + /** DELETING - The schedule is being deleted. */ DELETING = 4, + /** UPDATING - Changes are being made to snapshot schedule settings or a list of attached disks. */ UPDATING = 5, UNRECOGNIZED = -1, } @@ -98,20 +119,27 @@ export interface SnapshotSchedule_LabelsEntry { value: string; } +/** A resource for frequency settings of a snapshot schedule. */ export interface SchedulePolicy { $type: "yandex.cloud.compute.v1.SchedulePolicy"; - /** start time for the first run. */ + /** Timestamp for creating the first snapshot. */ startAt?: Date; - /** cron format (* * * * *) */ + /** + * Cron expression for the snapshot schedule (UTC+0). + * + * The expression must consist of five fields (`Minutes Hours Day-of-month Month Day-of-week`) or be one of + * nonstandard predefined expressions (e.g. `@hourly`). For details about the format, + * see [documentation](/docs/compute/concepts/snapshot-schedule#cron) + */ expression: string; } -/** Properties of created snapshot backup */ +/** A resource for attributes of snapshots created by the snapshot schedule. */ export interface SnapshotSpec { $type: "yandex.cloud.compute.v1.SnapshotSpec"; /** Description of the created snapshot. */ description: string; - /** Resource labels as `key:value` pairs. */ + /** Snapshot labels as `key:value` pairs. */ labels: { [key: string]: string }; } diff --git a/src/generated/yandex/cloud/compute/v1/snapshot_schedule_service.ts b/src/generated/yandex/cloud/compute/v1/snapshot_schedule_service.ts index f62f353d..dc61591f 100644 --- a/src/generated/yandex/cloud/compute/v1/snapshot_schedule_service.ts +++ b/src/generated/yandex/cloud/compute/v1/snapshot_schedule_service.ts @@ -29,61 +29,109 @@ export const protobufPackage = "yandex.cloud.compute.v1"; export interface GetSnapshotScheduleRequest { $type: "yandex.cloud.compute.v1.GetSnapshotScheduleRequest"; - /** ID of the SnapshotSchedule resource to return. */ + /** + * ID of the snapshot schedule to return. + * + * To get a schedule ID, make a [SnapshotScheduleService.List] request. + */ snapshotScheduleId: string; } export interface ListSnapshotSchedulesRequest { $type: "yandex.cloud.compute.v1.ListSnapshotSchedulesRequest"; - /** ID of the folder to list snapshot schedules in. */ + /** + * ID of the folder to list snapshot schedules in. + * + * To get the folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ folderId: string; /** * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], - * the service returns a [ListSnapshotSchedulesResponse.next_page_token] + * results is larger than `page_size`, the service returns a [ListSnapshotSchedulesResponse.next_page_token] * that can be used to get the next page of results in subsequent list requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the + * Page token. To get the next page of results, set `page_token` to the * [ListSnapshotSchedulesResponse.next_page_token] returned by a previous list request. */ pageToken: string; + /** + * A filter expression that filters snapshot schedules listed in the response. + * + * The expression must specify: + * 1. The field name. Currently you can use filtering only on [SnapshotSchedule.name] field. + * 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + * 3. The value. Must be 3-63 characters long and match the regular expression `^[a-z][-a-z0-9]{1,61}[a-z0-9]`. + * Example of a filter: `name=my-schedule`. + */ filter: string; /** - * By which column the listing should be ordered and in which direction, - * format is "createdAt desc". "id asc" if omitted. + * A sorting expression that sorts snapshot schedules listed in the response. + * + * The expression must specify the field name from [SnapshotSchedule] and `asc`ending or `desc`ending order, + * e.g. `createdAt desc`. + * + * Default value: `id asc`. */ orderBy: string; } export interface ListSnapshotSchedulesResponse { $type: "yandex.cloud.compute.v1.ListSnapshotSchedulesResponse"; - /** List of SnapshotSchedule resources. */ + /** List of snapshot schedules in the specified folder. */ snapshotSchedules: SnapshotSchedule[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListSnapshotSchedulesRequest.page_size], use - * the [next_page_token] as the value - * for the [ListSnapshotSchedulesRequest.page_token] query parameter - * in the next list request. Each subsequent list request will have its own - * [next_page_token] to continue paging through the results. + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListSnapshotSchedulesRequest.page_size], use `next_page_token` as the value + * for the [ListSnapshotSchedulesRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. */ nextPageToken: string; } export interface CreateSnapshotScheduleRequest { $type: "yandex.cloud.compute.v1.CreateSnapshotScheduleRequest"; - /** ID of the folder to create a snapshot schedule in. */ + /** + * ID of the folder to create a snapshot schedule in. + * + * Snapshots are created in the same folder as the schedule, even if disks from other folders are attached + * to the schedule. + * + * To get a folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ folderId: string; + /** + * Name of the snapshot schedule. + * + * The name must be unique within the folder. + */ name: string; + /** Description of the snapshot schedule. */ description: string; + /** Snapshot schedule labels as `key:value` pairs. */ labels: { [key: string]: string }; - /** schedule properties */ + /** Frequency settings of the snapshot schedule. */ schedulePolicy?: SchedulePolicy; + /** + * Retention period of the snapshot schedule. Once a snapshot created by the schedule reaches this age, it is + * automatically deleted. + */ retentionPeriod?: Duration | undefined; + /** + * Retention count of the snapshot schedule. Once the number of snapshots created by the schedule exceeds this + * number, the oldest ones are automatically deleted. E.g. if the number is 5, the first snapshot is deleted + * after the sixth one is created, the second is deleted after the seventh one is created, and so on. + */ snapshotCount: number | undefined; + /** Attributes of snapshots created by the snapshot schedule. */ snapshotSpec?: SnapshotSpec; + /** + * List of IDs of the disks attached to the snapshot schedule. + * + * To get a disk ID, make a [yandex.cloud.compute.v1.DiskService.List] request. + */ diskIds: string[]; } @@ -95,22 +143,52 @@ export interface CreateSnapshotScheduleRequest_LabelsEntry { export interface CreateSnapshotScheduleMetadata { $type: "yandex.cloud.compute.v1.CreateSnapshotScheduleMetadata"; + /** ID of the snapshot schedule that is being created. */ snapshotScheduleId: string; } export interface UpdateSnapshotScheduleRequest { $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleRequest"; - /** ID of the SnapshotSchedule resource to update. */ + /** + * ID of the snapshot schedule to update. + * + * To get the snapshot schedule ID, make a [SnapshotScheduleService.List] request. + */ snapshotScheduleId: string; - /** Field mask that specifies which fields of the SnapshotSchedule resource are going to be updated. */ + /** Field mask that specifies which attributes of the snapshot schedule should be updated. */ updateMask?: FieldMask; - /** schedule properties */ + /** + * New name for the snapshot schedule. + * + * The name must be unique within the folder. + */ name: string; + /** New description of the snapshot schedule. */ description: string; + /** + * Snapshot schedule labels as `key:value` pairs. + * + * Existing set of labels is completely replaced by the provided set, so if you just want + * to add or remove a label: + * 1. Get the current set of labels with a [SnapshotScheduleService.Get] request. + * 2. Add or remove a label in this set. + * 3. Send the new set in this field. + */ labels: { [key: string]: string }; + /** New frequency settings of the snapshot schedule. */ schedulePolicy?: SchedulePolicy; + /** + * Retention period of the snapshot schedule. Once a snapshot created by the schedule reaches this age, it is + * automatically deleted. + */ retentionPeriod?: Duration | undefined; + /** + * Retention count of the snapshot schedule. Once the number of snapshots created by the schedule exceeds this + * number, the oldest ones are automatically deleted. E.g. if the number is 5, the first snapshot is deleted + * after the sixth one is created, the second is deleted after the seventh one is created, and so on. + */ snapshotCount: number | undefined; + /** New attributes of snapshots created by the snapshot schedule. */ snapshotSpec?: SnapshotSpec; } @@ -122,50 +200,72 @@ export interface UpdateSnapshotScheduleRequest_LabelsEntry { export interface UpdateSnapshotScheduleMetadata { $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleMetadata"; + /** ID of the snapshot schedule that is being updated. */ snapshotScheduleId: string; } export interface DeleteSnapshotScheduleRequest { $type: "yandex.cloud.compute.v1.DeleteSnapshotScheduleRequest"; - /** ID of the snapshot schedule to delete. */ + /** + * ID of the snapshot schedule to delete. + * + * To get a snapshot schedule ID, make a [SnapshotScheduleService.List] request. + */ snapshotScheduleId: string; } export interface DeleteSnapshotScheduleMetadata { $type: "yandex.cloud.compute.v1.DeleteSnapshotScheduleMetadata"; + /** ID of the snapshot schedule that is being deleted. */ snapshotScheduleId: string; } export interface DisableSnapshotScheduleRequest { $type: "yandex.cloud.compute.v1.DisableSnapshotScheduleRequest"; - /** ID of the snapshot schedule to disable. */ + /** + * ID of the snapshot schedule to disable. + * + * To get a snapshot schedule ID, make a [SnapshotScheduleService.List] request. + */ snapshotScheduleId: string; } export interface DisableSnapshotScheduleMetadata { $type: "yandex.cloud.compute.v1.DisableSnapshotScheduleMetadata"; + /** ID of the snapshot schedule that is being disabled. */ snapshotScheduleId: string; } export interface EnableSnapshotScheduleRequest { $type: "yandex.cloud.compute.v1.EnableSnapshotScheduleRequest"; - /** ID of the snapshot schedule to enable. */ + /** + * ID of the snapshot schedule to enable. + * + * To get a snapshot schedule ID, make a [SnapshotScheduleService.List] request. + */ snapshotScheduleId: string; } export interface EnableSnapshotScheduleMetadata { $type: "yandex.cloud.compute.v1.EnableSnapshotScheduleMetadata"; + /** ID of the snapshot schedule that is being enabled. */ snapshotScheduleId: string; } export interface ListSnapshotScheduleOperationsRequest { $type: "yandex.cloud.compute.v1.ListSnapshotScheduleOperationsRequest"; - /** ID of the SnapshotSchedule resource to list operations for. */ + /** + * ID of the snapshot schedule to list operations for. + * + * To get a snapshot schedule ID, make a [SnapshotScheduleService.List] request. + */ snapshotScheduleId: string; /** * The maximum number of results per page to return. If the number of available * results is larger than [page_size], the service returns a [ListSnapshotScheduleOperationsResponse.next_page_token] * that can be used to get the next page of results in subsequent list requests. + * + * Default value: 100. */ pageSize: number; /** @@ -180,52 +280,66 @@ export interface ListSnapshotScheduleOperationsResponse { /** List of operations for the specified snapshot schedule. */ operations: Operation[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListSnapshotScheduleOperationsRequest.page_size], use the [next_page_token] as the value - * for the [ListSnapshotScheduleOperationsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListSnapshotScheduleOperationsRequest.page_size], use `next_page_token` as the value + * for the [ListSnapshotScheduleOperationsRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. */ nextPageToken: string; } export interface ListSnapshotScheduleSnapshotsRequest { $type: "yandex.cloud.compute.v1.ListSnapshotScheduleSnapshotsRequest"; - /** ID of the SnapshotSchedule resource to list snapshots for. */ + /** + * ID of the snapshot schedule to list created snapshots for. + * + * To get a snapshot schedule ID, make a [SnapshotScheduleService.List] request. + */ snapshotScheduleId: string; /** * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListSnapshotScheduleSnapshotsResponse.next_page_token] + * results is larger than [page_size], the service returns a [ListSnapshotScheduleOperationsResponse.next_page_token] * that can be used to get the next page of results in subsequent list requests. + * + * Default value: 100. */ pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListSnapshotScheduleSnapshotsResponse.next_page_token] returned by a previous list request. + * [ListSnapshotScheduleOperationsResponse.next_page_token] returned by a previous list request. */ pageToken: string; } export interface ListSnapshotScheduleSnapshotsResponse { $type: "yandex.cloud.compute.v1.ListSnapshotScheduleSnapshotsResponse"; - /** List of snapshots for the specified snapshot schedule. */ + /** List of snapshots created by the specified snapshot schedule. */ snapshots: Snapshot[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListSnapshotScheduleSnapshotsRequest.page_size], use the [next_page_token] as the value - * for the [ListSnapshotScheduleSnapshotsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListSnapshotScheduleSnapshotsRequest.page_size], use `next_page_token` as the value + * for the [ListSnapshotScheduleSnapshotsRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. */ nextPageToken: string; } export interface ListSnapshotScheduleDisksRequest { $type: "yandex.cloud.compute.v1.ListSnapshotScheduleDisksRequest"; - /** ID of the SnapshotSchedule resource to list disks for. */ + /** + * ID of the snapshot schedule to list attached disks for. + * + * To get a snapshot schedule ID, make a [SnapshotScheduleService.List] request. + */ snapshotScheduleId: string; /** * The maximum number of results per page to return. If the number of available * results is larger than [page_size], the service returns a [ListSnapshotScheduleDisksResponse.next_page_token] * that can be used to get the next page of results in subsequent list requests. + * + * Default value: 100. */ pageSize: number; /** @@ -237,29 +351,43 @@ export interface ListSnapshotScheduleDisksRequest { export interface ListSnapshotScheduleDisksResponse { $type: "yandex.cloud.compute.v1.ListSnapshotScheduleDisksResponse"; - /** List of disks for the specified snapshot schedule. */ + /** List of disks attached to the specified snapshot schedule. */ disks: Disk[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListSnapshotScheduleDisksRequest.page_size], use the [next_page_token] as the value - * for the [ListSnapshotScheduleDisksRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListSnapshotScheduleDisksRequest.page_size], use `next_page_token` as the value + * for the [ListSnapshotScheduleDisksRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. */ nextPageToken: string; } export interface UpdateSnapshotScheduleDisksRequest { $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleDisksRequest"; - /** ID of the snapshot schedule to update. */ + /** + * ID of the snapshot schedule to update. + * + * To get a snapshot schedule ID, make a [SnapshotScheduleService.List] request. + */ snapshotScheduleId: string; - /** List of disk ids to remove from the specified schedule. */ + /** + * List of IDs of the disks to detach from the specified schedule. + * + * To get an ID of a disk attached to the schedule, make a [SnapshotScheduleService.ListDisks] request. + */ remove: string[]; - /** List of disk ids to add to the specified schedule */ + /** + * List of IDs of the disks to attach to the specified schedule. + * + * To get a disk ID, make a [yandex.cloud.compute.v1.DiskService.List] request. + */ add: string[]; } export interface UpdateSnapshotScheduleDisksMetadata { $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleDisksMetadata"; + /** ID of the snapshot schedule that is being updated. */ snapshotScheduleId: string; } @@ -2592,12 +2720,12 @@ messageTypeRegistry.set( UpdateSnapshotScheduleDisksMetadata ); -/** A set of methods for managing SnapshotSchedule resources. */ +/** A set of methods for managing snapshot schedules. */ export const SnapshotScheduleServiceService = { /** - * Returns the specified SnapshotSchedule resource. + * Returns the specified snapshot schedule. * - * To get the list of available SnapshotSchedule resources, make a [List] request. + * To get the list of available snapshot schedules, make a [List] request. */ get: { path: "/yandex.cloud.compute.v1.SnapshotScheduleService/Get", @@ -2611,7 +2739,7 @@ export const SnapshotScheduleServiceService = { Buffer.from(SnapshotSchedule.encode(value).finish()), responseDeserialize: (value: Buffer) => SnapshotSchedule.decode(value), }, - /** Retrieves the list of SnapshotSchedule resources in the specified folder. */ + /** Retrieves the list of snapshot schedules in the specified folder. */ list: { path: "/yandex.cloud.compute.v1.SnapshotScheduleService/List", requestStream: false, @@ -2638,7 +2766,11 @@ export const SnapshotScheduleServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Updates the specified snapshot schedule. */ + /** + * Updates the specified snapshot schedule. + * + * The schedule is updated only after all snapshot creations and deletions triggered by the schedule are completed. + */ update: { path: "/yandex.cloud.compute.v1.SnapshotScheduleService/Update", requestStream: false, @@ -2654,8 +2786,10 @@ export const SnapshotScheduleServiceService = { /** * Deletes the specified snapshot schedule. * - * Deleting a snapshot schedule removes its data permanently and is irreversible. However, deleting a schedule does not delete - * any snapshots previously made by the schedule. You must delete snapshots separately. + * Deleting a snapshot schedule removes its data permanently and is irreversible. However, deleting a schedule + * does not delete any snapshots created by the schedule. You must delete snapshots separately. + * + * The schedule is deleted only after all snapshot creations and deletions triggered by the schedule are completed. */ delete: { path: "/yandex.cloud.compute.v1.SnapshotScheduleService/Delete", @@ -2669,7 +2803,11 @@ export const SnapshotScheduleServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** UpdateDisks of schedule */ + /** + * Updates the list of disks attached to the specified schedule. + * + * The schedule is updated only after all snapshot creations and deletions triggered by the schedule are completed. + */ updateDisks: { path: "/yandex.cloud.compute.v1.SnapshotScheduleService/UpdateDisks", requestStream: false, @@ -2683,9 +2821,12 @@ export const SnapshotScheduleServiceService = { responseDeserialize: (value: Buffer) => Operation.decode(value), }, /** - * Disable schedule sets status InActive. + * Disables the specified snapshot schedule. + * + * The [SnapshotSchedule.status] is changed to `INACTIVE`: the schedule is interrupted, snapshots won't be created + * or deleted. * - * When schedule os disabled snapshots will not be created or deleted according to retention policy. + * The schedule is disabled only after all snapshot creations and deletions triggered by the schedule are completed. */ disable: { path: "/yandex.cloud.compute.v1.SnapshotScheduleService/Disable", @@ -2699,7 +2840,12 @@ export const SnapshotScheduleServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Enable schedule sets status Active. */ + /** + * Enables the specified snapshot schedule. + * + * The [SnapshotSchedule.status] is changed to `ACTIVE`: new disk snapshots will be created, old ones deleted + * (if [SnapshotSchedule.retention_policy] is specified). + */ enable: { path: "/yandex.cloud.compute.v1.SnapshotScheduleService/Enable", requestStream: false, @@ -2728,7 +2874,7 @@ export const SnapshotScheduleServiceService = { responseDeserialize: (value: Buffer) => ListSnapshotScheduleOperationsResponse.decode(value), }, - /** List snapshot created by schedule. */ + /** Retrieves the list of snapshots created by the specified snapshot schedule. */ listSnapshots: { path: "/yandex.cloud.compute.v1.SnapshotScheduleService/ListSnapshots", requestStream: false, @@ -2742,7 +2888,7 @@ export const SnapshotScheduleServiceService = { responseDeserialize: (value: Buffer) => ListSnapshotScheduleSnapshotsResponse.decode(value), }, - /** List disks that belong to schedule. */ + /** Retrieves the list of disks attached to the specified snapshot schedule. */ listDisks: { path: "/yandex.cloud.compute.v1.SnapshotScheduleService/ListDisks", requestStream: false, @@ -2761,48 +2907,66 @@ export const SnapshotScheduleServiceService = { export interface SnapshotScheduleServiceServer extends UntypedServiceImplementation { /** - * Returns the specified SnapshotSchedule resource. + * Returns the specified snapshot schedule. * - * To get the list of available SnapshotSchedule resources, make a [List] request. + * To get the list of available snapshot schedules, make a [List] request. */ get: handleUnaryCall; - /** Retrieves the list of SnapshotSchedule resources in the specified folder. */ + /** Retrieves the list of snapshot schedules in the specified folder. */ list: handleUnaryCall< ListSnapshotSchedulesRequest, ListSnapshotSchedulesResponse >; /** Creates a snapshot schedule in the specified folder. */ create: handleUnaryCall; - /** Updates the specified snapshot schedule. */ + /** + * Updates the specified snapshot schedule. + * + * The schedule is updated only after all snapshot creations and deletions triggered by the schedule are completed. + */ update: handleUnaryCall; /** * Deletes the specified snapshot schedule. * - * Deleting a snapshot schedule removes its data permanently and is irreversible. However, deleting a schedule does not delete - * any snapshots previously made by the schedule. You must delete snapshots separately. + * Deleting a snapshot schedule removes its data permanently and is irreversible. However, deleting a schedule + * does not delete any snapshots created by the schedule. You must delete snapshots separately. + * + * The schedule is deleted only after all snapshot creations and deletions triggered by the schedule are completed. */ delete: handleUnaryCall; - /** UpdateDisks of schedule */ + /** + * Updates the list of disks attached to the specified schedule. + * + * The schedule is updated only after all snapshot creations and deletions triggered by the schedule are completed. + */ updateDisks: handleUnaryCall; /** - * Disable schedule sets status InActive. + * Disables the specified snapshot schedule. + * + * The [SnapshotSchedule.status] is changed to `INACTIVE`: the schedule is interrupted, snapshots won't be created + * or deleted. * - * When schedule os disabled snapshots will not be created or deleted according to retention policy. + * The schedule is disabled only after all snapshot creations and deletions triggered by the schedule are completed. */ disable: handleUnaryCall; - /** Enable schedule sets status Active. */ + /** + * Enables the specified snapshot schedule. + * + * The [SnapshotSchedule.status] is changed to `ACTIVE`: new disk snapshots will be created, old ones deleted + * (if [SnapshotSchedule.retention_policy] is specified). + */ enable: handleUnaryCall; /** Lists operations for the specified snapshot schedule. */ listOperations: handleUnaryCall< ListSnapshotScheduleOperationsRequest, ListSnapshotScheduleOperationsResponse >; - /** List snapshot created by schedule. */ + /** Retrieves the list of snapshots created by the specified snapshot schedule. */ listSnapshots: handleUnaryCall< ListSnapshotScheduleSnapshotsRequest, ListSnapshotScheduleSnapshotsResponse >; - /** List disks that belong to schedule. */ + /** Retrieves the list of disks attached to the specified snapshot schedule. */ listDisks: handleUnaryCall< ListSnapshotScheduleDisksRequest, ListSnapshotScheduleDisksResponse @@ -2811,9 +2975,9 @@ export interface SnapshotScheduleServiceServer export interface SnapshotScheduleServiceClient extends Client { /** - * Returns the specified SnapshotSchedule resource. + * Returns the specified snapshot schedule. * - * To get the list of available SnapshotSchedule resources, make a [List] request. + * To get the list of available snapshot schedules, make a [List] request. */ get( request: GetSnapshotScheduleRequest, @@ -2830,7 +2994,7 @@ export interface SnapshotScheduleServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: SnapshotSchedule) => void ): ClientUnaryCall; - /** Retrieves the list of SnapshotSchedule resources in the specified folder. */ + /** Retrieves the list of snapshot schedules in the specified folder. */ list( request: ListSnapshotSchedulesRequest, callback: ( @@ -2871,7 +3035,11 @@ export interface SnapshotScheduleServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Updates the specified snapshot schedule. */ + /** + * Updates the specified snapshot schedule. + * + * The schedule is updated only after all snapshot creations and deletions triggered by the schedule are completed. + */ update( request: UpdateSnapshotScheduleRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2890,8 +3058,10 @@ export interface SnapshotScheduleServiceClient extends Client { /** * Deletes the specified snapshot schedule. * - * Deleting a snapshot schedule removes its data permanently and is irreversible. However, deleting a schedule does not delete - * any snapshots previously made by the schedule. You must delete snapshots separately. + * Deleting a snapshot schedule removes its data permanently and is irreversible. However, deleting a schedule + * does not delete any snapshots created by the schedule. You must delete snapshots separately. + * + * The schedule is deleted only after all snapshot creations and deletions triggered by the schedule are completed. */ delete( request: DeleteSnapshotScheduleRequest, @@ -2908,7 +3078,11 @@ export interface SnapshotScheduleServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** UpdateDisks of schedule */ + /** + * Updates the list of disks attached to the specified schedule. + * + * The schedule is updated only after all snapshot creations and deletions triggered by the schedule are completed. + */ updateDisks( request: UpdateSnapshotScheduleDisksRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2925,9 +3099,12 @@ export interface SnapshotScheduleServiceClient extends Client { callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; /** - * Disable schedule sets status InActive. + * Disables the specified snapshot schedule. + * + * The [SnapshotSchedule.status] is changed to `INACTIVE`: the schedule is interrupted, snapshots won't be created + * or deleted. * - * When schedule os disabled snapshots will not be created or deleted according to retention policy. + * The schedule is disabled only after all snapshot creations and deletions triggered by the schedule are completed. */ disable( request: DisableSnapshotScheduleRequest, @@ -2944,7 +3121,12 @@ export interface SnapshotScheduleServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Enable schedule sets status Active. */ + /** + * Enables the specified snapshot schedule. + * + * The [SnapshotSchedule.status] is changed to `ACTIVE`: new disk snapshots will be created, old ones deleted + * (if [SnapshotSchedule.retention_policy] is specified). + */ enable( request: EnableSnapshotScheduleRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2985,7 +3167,7 @@ export interface SnapshotScheduleServiceClient extends Client { response: ListSnapshotScheduleOperationsResponse ) => void ): ClientUnaryCall; - /** List snapshot created by schedule. */ + /** Retrieves the list of snapshots created by the specified snapshot schedule. */ listSnapshots( request: ListSnapshotScheduleSnapshotsRequest, callback: ( @@ -3010,7 +3192,7 @@ export interface SnapshotScheduleServiceClient extends Client { response: ListSnapshotScheduleSnapshotsResponse ) => void ): ClientUnaryCall; - /** List disks that belong to schedule. */ + /** Retrieves the list of disks attached to the specified snapshot schedule. */ listDisks( request: ListSnapshotScheduleDisksRequest, callback: ( diff --git a/src/generated/yandex/cloud/compute/v1/snapshot_service.ts b/src/generated/yandex/cloud/compute/v1/snapshot_service.ts index 00812c1d..03d3aa03 100644 --- a/src/generated/yandex/cloud/compute/v1/snapshot_service.ts +++ b/src/generated/yandex/cloud/compute/v1/snapshot_service.ts @@ -50,12 +50,21 @@ export interface ListSnapshotsRequest { pageToken: string; /** * A filter expression that filters resources listed in the response. - * The expression must specify: - * 1. The field name. Currently you can use filtering only on the [Snapshot.name] field. - * 2. An `=` operator. - * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z]([-a-z0-9]{,61}[a-z0-9])?`. + * The expression consists of one or more conditions united by `AND` operator: ` [AND [<...> AND ]]`. + * + * Each condition has the form ` `, where: + * 1. `` is the field name. Currently you can use filtering only on the limited number of fields. + * 2. `` is a logical operator, one of `=`, `!=`, `IN`, `NOT IN`. + * 3. `` represents a value. + * String values should be written in double (`"`) or single (`'`) quotes. C-style escape sequences are supported (`\"` turns to `"`, `\'` to `'`, `\\` to backslash). */ filter: string; + /** + * By which column the listing should be ordered and in which direction, + * format is "createdAt desc". "id asc" if omitted. + * The default sorting order is ascending + */ + orderBy: string; } export interface ListSnapshotsResponse { @@ -253,6 +262,7 @@ const baseListSnapshotsRequest: object = { pageSize: 0, pageToken: "", filter: "", + orderBy: "", }; export const ListSnapshotsRequest = { @@ -274,6 +284,9 @@ export const ListSnapshotsRequest = { if (message.filter !== "") { writer.uint32(34).string(message.filter); } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } return writer; }, @@ -299,6 +312,9 @@ export const ListSnapshotsRequest = { case 4: message.filter = reader.string(); break; + case 5: + message.orderBy = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -325,6 +341,10 @@ export const ListSnapshotsRequest = { object.filter !== undefined && object.filter !== null ? String(object.filter) : ""; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; return message; }, @@ -335,6 +355,7 @@ export const ListSnapshotsRequest = { (obj.pageSize = Math.round(message.pageSize)); message.pageToken !== undefined && (obj.pageToken = message.pageToken); message.filter !== undefined && (obj.filter = message.filter); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); return obj; }, @@ -346,6 +367,7 @@ export const ListSnapshotsRequest = { message.pageSize = object.pageSize ?? 0; message.pageToken = object.pageToken ?? ""; message.filter = object.filter ?? ""; + message.orderBy = object.orderBy ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/containerregistry/index.ts b/src/generated/yandex/cloud/containerregistry/index.ts index a83bbb3f..a185598e 100644 --- a/src/generated/yandex/cloud/containerregistry/index.ts +++ b/src/generated/yandex/cloud/containerregistry/index.ts @@ -8,5 +8,7 @@ export * as registry from './v1/registry' export * as registry_service from './v1/registry_service' export * as repository from './v1/repository' export * as repository_service from './v1/repository_service' +export * as scan_policy from './v1/scan_policy' +export * as scan_policy_service from './v1/scan_policy_service' export * as scanner from './v1/scanner' export * as scanner_service from './v1/scanner_service' \ No newline at end of file diff --git a/src/generated/yandex/cloud/containerregistry/v1/image.ts b/src/generated/yandex/cloud/containerregistry/v1/image.ts index 8c6c4acd..902ea411 100644 --- a/src/generated/yandex/cloud/containerregistry/v1/image.ts +++ b/src/generated/yandex/cloud/containerregistry/v1/image.ts @@ -7,7 +7,7 @@ import { Timestamp } from "../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.containerregistry.v1"; -/** An Image resource. For more information, see [Docker image](/docs/cloud/container-registry/docker-image). */ +/** An Image resource. For more information, see [Docker image](/docs/container-registry/concepts/docker-image). */ export interface Image { $type: "yandex.cloud.containerregistry.v1.Image"; /** Output only. ID of the Docker image. */ diff --git a/src/generated/yandex/cloud/containerregistry/v1/repository.ts b/src/generated/yandex/cloud/containerregistry/v1/repository.ts index a44aaf52..ff4b532d 100644 --- a/src/generated/yandex/cloud/containerregistry/v1/repository.ts +++ b/src/generated/yandex/cloud/containerregistry/v1/repository.ts @@ -5,7 +5,7 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.containerregistry.v1"; -/** A Repository resource. For more information, see [Repository](/docs/cloud/container-registry/repository). */ +/** A Repository resource. For more information, see [Repository](/docs/container-registry/concepts/repository). */ export interface Repository { $type: "yandex.cloud.containerregistry.v1.Repository"; /** diff --git a/src/generated/yandex/cloud/containerregistry/v1/scan_policy.ts b/src/generated/yandex/cloud/containerregistry/v1/scan_policy.ts new file mode 100644 index 00000000..dc236747 --- /dev/null +++ b/src/generated/yandex/cloud/containerregistry/v1/scan_policy.ts @@ -0,0 +1,521 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Duration } from "../../../../google/protobuf/duration"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.containerregistry.v1"; + +export interface ScanPolicy { + $type: "yandex.cloud.containerregistry.v1.ScanPolicy"; + /** Output only. ID of the scan policy. */ + id: string; + /** + * ID of the registry that the scan policy belongs to. + * Required. The maximum string length in characters is 50. + */ + registryId: string; + /** Name of the scan policy. */ + name: string; + /** + * Description of the scan policy. + * The maximum string length in characters is 256. + */ + description: string; + /** The rules of scan policy. */ + rules?: ScanRules; + /** Output only. Creation timestamp. */ + createdAt?: Date; + /** Turns off scan policy. */ + disabled: boolean; +} + +export interface ScanRules { + $type: "yandex.cloud.containerregistry.v1.ScanRules"; + /** Description of on-push scan rule. */ + pushRule?: PushRule; + /** Description of time based rescan rule. */ + scheduleRules: ScheduledRule[]; +} + +export interface PushRule { + $type: "yandex.cloud.containerregistry.v1.PushRule"; + /** List of repositories that are scanned with rule. Child repositories are included into parent node. "*" - means all repositories in registry */ + repositoryPrefixes: string[]; + /** Turns off scan rule. */ + disabled: boolean; +} + +export interface ScheduledRule { + $type: "yandex.cloud.containerregistry.v1.ScheduledRule"; + /** List of repositories that are scanned with rule. Child repositories are included into parent node. "*" - means all repositories in registry */ + repositoryPrefixes: string[]; + /** Period of time since last scan to trigger automatic rescan. */ + rescanPeriod?: Duration; + /** Turns off scan rule. */ + disabled: boolean; +} + +const baseScanPolicy: object = { + $type: "yandex.cloud.containerregistry.v1.ScanPolicy", + id: "", + registryId: "", + name: "", + description: "", + disabled: false, +}; + +export const ScanPolicy = { + $type: "yandex.cloud.containerregistry.v1.ScanPolicy" as const, + + encode( + message: ScanPolicy, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.registryId !== "") { + writer.uint32(18).string(message.registryId); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + if (message.rules !== undefined) { + ScanRules.encode(message.rules, writer.uint32(42).fork()).ldelim(); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(50).fork() + ).ldelim(); + } + if (message.disabled === true) { + writer.uint32(56).bool(message.disabled); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ScanPolicy { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseScanPolicy } as ScanPolicy; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.registryId = reader.string(); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + message.rules = ScanRules.decode(reader, reader.uint32()); + break; + case 6: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 7: + message.disabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ScanPolicy { + const message = { ...baseScanPolicy } as ScanPolicy; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.registryId = + object.registryId !== undefined && object.registryId !== null + ? String(object.registryId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.rules = + object.rules !== undefined && object.rules !== null + ? ScanRules.fromJSON(object.rules) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.disabled = + object.disabled !== undefined && object.disabled !== null + ? Boolean(object.disabled) + : false; + return message; + }, + + toJSON(message: ScanPolicy): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.registryId !== undefined && (obj.registryId = message.registryId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + message.rules !== undefined && + (obj.rules = message.rules ? ScanRules.toJSON(message.rules) : undefined); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.disabled !== undefined && (obj.disabled = message.disabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): ScanPolicy { + const message = { ...baseScanPolicy } as ScanPolicy; + message.id = object.id ?? ""; + message.registryId = object.registryId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.rules = + object.rules !== undefined && object.rules !== null + ? ScanRules.fromPartial(object.rules) + : undefined; + message.createdAt = object.createdAt ?? undefined; + message.disabled = object.disabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set(ScanPolicy.$type, ScanPolicy); + +const baseScanRules: object = { + $type: "yandex.cloud.containerregistry.v1.ScanRules", +}; + +export const ScanRules = { + $type: "yandex.cloud.containerregistry.v1.ScanRules" as const, + + encode( + message: ScanRules, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.pushRule !== undefined) { + PushRule.encode(message.pushRule, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.scheduleRules) { + ScheduledRule.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ScanRules { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseScanRules } as ScanRules; + message.scheduleRules = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pushRule = PushRule.decode(reader, reader.uint32()); + break; + case 2: + message.scheduleRules.push( + ScheduledRule.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ScanRules { + const message = { ...baseScanRules } as ScanRules; + message.pushRule = + object.pushRule !== undefined && object.pushRule !== null + ? PushRule.fromJSON(object.pushRule) + : undefined; + message.scheduleRules = (object.scheduleRules ?? []).map((e: any) => + ScheduledRule.fromJSON(e) + ); + return message; + }, + + toJSON(message: ScanRules): unknown { + const obj: any = {}; + message.pushRule !== undefined && + (obj.pushRule = message.pushRule + ? PushRule.toJSON(message.pushRule) + : undefined); + if (message.scheduleRules) { + obj.scheduleRules = message.scheduleRules.map((e) => + e ? ScheduledRule.toJSON(e) : undefined + ); + } else { + obj.scheduleRules = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ScanRules { + const message = { ...baseScanRules } as ScanRules; + message.pushRule = + object.pushRule !== undefined && object.pushRule !== null + ? PushRule.fromPartial(object.pushRule) + : undefined; + message.scheduleRules = + object.scheduleRules?.map((e) => ScheduledRule.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(ScanRules.$type, ScanRules); + +const basePushRule: object = { + $type: "yandex.cloud.containerregistry.v1.PushRule", + repositoryPrefixes: "", + disabled: false, +}; + +export const PushRule = { + $type: "yandex.cloud.containerregistry.v1.PushRule" as const, + + encode( + message: PushRule, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.repositoryPrefixes) { + writer.uint32(10).string(v!); + } + if (message.disabled === true) { + writer.uint32(16).bool(message.disabled); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PushRule { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePushRule } as PushRule; + message.repositoryPrefixes = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.repositoryPrefixes.push(reader.string()); + break; + case 2: + message.disabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PushRule { + const message = { ...basePushRule } as PushRule; + message.repositoryPrefixes = (object.repositoryPrefixes ?? []).map( + (e: any) => String(e) + ); + message.disabled = + object.disabled !== undefined && object.disabled !== null + ? Boolean(object.disabled) + : false; + return message; + }, + + toJSON(message: PushRule): unknown { + const obj: any = {}; + if (message.repositoryPrefixes) { + obj.repositoryPrefixes = message.repositoryPrefixes.map((e) => e); + } else { + obj.repositoryPrefixes = []; + } + message.disabled !== undefined && (obj.disabled = message.disabled); + return obj; + }, + + fromPartial, I>>(object: I): PushRule { + const message = { ...basePushRule } as PushRule; + message.repositoryPrefixes = object.repositoryPrefixes?.map((e) => e) || []; + message.disabled = object.disabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set(PushRule.$type, PushRule); + +const baseScheduledRule: object = { + $type: "yandex.cloud.containerregistry.v1.ScheduledRule", + repositoryPrefixes: "", + disabled: false, +}; + +export const ScheduledRule = { + $type: "yandex.cloud.containerregistry.v1.ScheduledRule" as const, + + encode( + message: ScheduledRule, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.repositoryPrefixes) { + writer.uint32(10).string(v!); + } + if (message.rescanPeriod !== undefined) { + Duration.encode(message.rescanPeriod, writer.uint32(18).fork()).ldelim(); + } + if (message.disabled === true) { + writer.uint32(24).bool(message.disabled); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ScheduledRule { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseScheduledRule } as ScheduledRule; + message.repositoryPrefixes = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.repositoryPrefixes.push(reader.string()); + break; + case 2: + message.rescanPeriod = Duration.decode(reader, reader.uint32()); + break; + case 3: + message.disabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ScheduledRule { + const message = { ...baseScheduledRule } as ScheduledRule; + message.repositoryPrefixes = (object.repositoryPrefixes ?? []).map( + (e: any) => String(e) + ); + message.rescanPeriod = + object.rescanPeriod !== undefined && object.rescanPeriod !== null + ? Duration.fromJSON(object.rescanPeriod) + : undefined; + message.disabled = + object.disabled !== undefined && object.disabled !== null + ? Boolean(object.disabled) + : false; + return message; + }, + + toJSON(message: ScheduledRule): unknown { + const obj: any = {}; + if (message.repositoryPrefixes) { + obj.repositoryPrefixes = message.repositoryPrefixes.map((e) => e); + } else { + obj.repositoryPrefixes = []; + } + message.rescanPeriod !== undefined && + (obj.rescanPeriod = message.rescanPeriod + ? Duration.toJSON(message.rescanPeriod) + : undefined); + message.disabled !== undefined && (obj.disabled = message.disabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): ScheduledRule { + const message = { ...baseScheduledRule } as ScheduledRule; + message.repositoryPrefixes = object.repositoryPrefixes?.map((e) => e) || []; + message.rescanPeriod = + object.rescanPeriod !== undefined && object.rescanPeriod !== null + ? Duration.fromPartial(object.rescanPeriod) + : undefined; + message.disabled = object.disabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set(ScheduledRule.$type, ScheduledRule); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/containerregistry/v1/scan_policy_service.ts b/src/generated/yandex/cloud/containerregistry/v1/scan_policy_service.ts new file mode 100644 index 00000000..fab56599 --- /dev/null +++ b/src/generated/yandex/cloud/containerregistry/v1/scan_policy_service.ts @@ -0,0 +1,979 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + ScanRules, + ScanPolicy, +} from "../../../../yandex/cloud/containerregistry/v1/scan_policy"; +import { FieldMask } from "../../../../google/protobuf/field_mask"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.containerregistry.v1"; + +export interface GetScanPolicyRequest { + $type: "yandex.cloud.containerregistry.v1.GetScanPolicyRequest"; + /** ID of the scan policy. */ + scanPolicyId: string; +} + +export interface GetScanPolicyByRegistryRequest { + $type: "yandex.cloud.containerregistry.v1.GetScanPolicyByRegistryRequest"; + /** ID of the registry with scan policy. */ + registryId: string; +} + +export interface CreateScanPolicyRequest { + $type: "yandex.cloud.containerregistry.v1.CreateScanPolicyRequest"; + /** ID of the scan policy registry. */ + registryId: string; + /** Name of the scan policy. */ + name: string; + /** Description of the scan policy. */ + description: string; + /** Rules of the scan policy. */ + rules?: ScanRules; +} + +export interface UpdateScanPolicyRequest { + $type: "yandex.cloud.containerregistry.v1.UpdateScanPolicyRequest"; + /** ID of the scan policy. */ + scanPolicyId: string; + /** Field mask that specifies which fields of the scan policy resource are going to be updated. */ + updateMask?: FieldMask; + /** Name of the scan policy. */ + name: string; + /** Description of the scan policy. */ + description: string; + /** Rules of the scan policy. */ + rules?: ScanRules; +} + +export interface DeleteScanPolicyRequest { + $type: "yandex.cloud.containerregistry.v1.DeleteScanPolicyRequest"; + /** ID of the scan policy. */ + scanPolicyId: string; +} + +export interface CreateScanPolicyMetadata { + $type: "yandex.cloud.containerregistry.v1.CreateScanPolicyMetadata"; + /** ID of created scan policy resource. */ + scanPolicyId: string; +} + +export interface UpdateScanPolicyMetadata { + $type: "yandex.cloud.containerregistry.v1.UpdateScanPolicyMetadata"; + /** ID of the scan policy resource that is updated. */ + scanPolicyId: string; +} + +export interface DeleteScanPolicyMetadata { + $type: "yandex.cloud.containerregistry.v1.DeleteScanPolicyMetadata"; + /** ID of the scan policy resource that is deleted. */ + scanPolicyId: string; +} + +const baseGetScanPolicyRequest: object = { + $type: "yandex.cloud.containerregistry.v1.GetScanPolicyRequest", + scanPolicyId: "", +}; + +export const GetScanPolicyRequest = { + $type: "yandex.cloud.containerregistry.v1.GetScanPolicyRequest" as const, + + encode( + message: GetScanPolicyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.scanPolicyId !== "") { + writer.uint32(10).string(message.scanPolicyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetScanPolicyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetScanPolicyRequest } as GetScanPolicyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.scanPolicyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetScanPolicyRequest { + const message = { ...baseGetScanPolicyRequest } as GetScanPolicyRequest; + message.scanPolicyId = + object.scanPolicyId !== undefined && object.scanPolicyId !== null + ? String(object.scanPolicyId) + : ""; + return message; + }, + + toJSON(message: GetScanPolicyRequest): unknown { + const obj: any = {}; + message.scanPolicyId !== undefined && + (obj.scanPolicyId = message.scanPolicyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetScanPolicyRequest { + const message = { ...baseGetScanPolicyRequest } as GetScanPolicyRequest; + message.scanPolicyId = object.scanPolicyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetScanPolicyRequest.$type, GetScanPolicyRequest); + +const baseGetScanPolicyByRegistryRequest: object = { + $type: "yandex.cloud.containerregistry.v1.GetScanPolicyByRegistryRequest", + registryId: "", +}; + +export const GetScanPolicyByRegistryRequest = { + $type: + "yandex.cloud.containerregistry.v1.GetScanPolicyByRegistryRequest" as const, + + encode( + message: GetScanPolicyByRegistryRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.registryId !== "") { + writer.uint32(10).string(message.registryId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetScanPolicyByRegistryRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetScanPolicyByRegistryRequest, + } as GetScanPolicyByRegistryRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.registryId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetScanPolicyByRegistryRequest { + const message = { + ...baseGetScanPolicyByRegistryRequest, + } as GetScanPolicyByRegistryRequest; + message.registryId = + object.registryId !== undefined && object.registryId !== null + ? String(object.registryId) + : ""; + return message; + }, + + toJSON(message: GetScanPolicyByRegistryRequest): unknown { + const obj: any = {}; + message.registryId !== undefined && (obj.registryId = message.registryId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetScanPolicyByRegistryRequest { + const message = { + ...baseGetScanPolicyByRegistryRequest, + } as GetScanPolicyByRegistryRequest; + message.registryId = object.registryId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GetScanPolicyByRegistryRequest.$type, + GetScanPolicyByRegistryRequest +); + +const baseCreateScanPolicyRequest: object = { + $type: "yandex.cloud.containerregistry.v1.CreateScanPolicyRequest", + registryId: "", + name: "", + description: "", +}; + +export const CreateScanPolicyRequest = { + $type: "yandex.cloud.containerregistry.v1.CreateScanPolicyRequest" as const, + + encode( + message: CreateScanPolicyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.registryId !== "") { + writer.uint32(10).string(message.registryId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + if (message.rules !== undefined) { + ScanRules.encode(message.rules, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateScanPolicyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateScanPolicyRequest, + } as CreateScanPolicyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.registryId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + message.rules = ScanRules.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateScanPolicyRequest { + const message = { + ...baseCreateScanPolicyRequest, + } as CreateScanPolicyRequest; + message.registryId = + object.registryId !== undefined && object.registryId !== null + ? String(object.registryId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.rules = + object.rules !== undefined && object.rules !== null + ? ScanRules.fromJSON(object.rules) + : undefined; + return message; + }, + + toJSON(message: CreateScanPolicyRequest): unknown { + const obj: any = {}; + message.registryId !== undefined && (obj.registryId = message.registryId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + message.rules !== undefined && + (obj.rules = message.rules ? ScanRules.toJSON(message.rules) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateScanPolicyRequest { + const message = { + ...baseCreateScanPolicyRequest, + } as CreateScanPolicyRequest; + message.registryId = object.registryId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.rules = + object.rules !== undefined && object.rules !== null + ? ScanRules.fromPartial(object.rules) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(CreateScanPolicyRequest.$type, CreateScanPolicyRequest); + +const baseUpdateScanPolicyRequest: object = { + $type: "yandex.cloud.containerregistry.v1.UpdateScanPolicyRequest", + scanPolicyId: "", + name: "", + description: "", +}; + +export const UpdateScanPolicyRequest = { + $type: "yandex.cloud.containerregistry.v1.UpdateScanPolicyRequest" as const, + + encode( + message: UpdateScanPolicyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.scanPolicyId !== "") { + writer.uint32(10).string(message.scanPolicyId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + if (message.rules !== undefined) { + ScanRules.encode(message.rules, writer.uint32(42).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateScanPolicyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateScanPolicyRequest, + } as UpdateScanPolicyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.scanPolicyId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + message.rules = ScanRules.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateScanPolicyRequest { + const message = { + ...baseUpdateScanPolicyRequest, + } as UpdateScanPolicyRequest; + message.scanPolicyId = + object.scanPolicyId !== undefined && object.scanPolicyId !== null + ? String(object.scanPolicyId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.rules = + object.rules !== undefined && object.rules !== null + ? ScanRules.fromJSON(object.rules) + : undefined; + return message; + }, + + toJSON(message: UpdateScanPolicyRequest): unknown { + const obj: any = {}; + message.scanPolicyId !== undefined && + (obj.scanPolicyId = message.scanPolicyId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + message.rules !== undefined && + (obj.rules = message.rules ? ScanRules.toJSON(message.rules) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateScanPolicyRequest { + const message = { + ...baseUpdateScanPolicyRequest, + } as UpdateScanPolicyRequest; + message.scanPolicyId = object.scanPolicyId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.rules = + object.rules !== undefined && object.rules !== null + ? ScanRules.fromPartial(object.rules) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateScanPolicyRequest.$type, UpdateScanPolicyRequest); + +const baseDeleteScanPolicyRequest: object = { + $type: "yandex.cloud.containerregistry.v1.DeleteScanPolicyRequest", + scanPolicyId: "", +}; + +export const DeleteScanPolicyRequest = { + $type: "yandex.cloud.containerregistry.v1.DeleteScanPolicyRequest" as const, + + encode( + message: DeleteScanPolicyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.scanPolicyId !== "") { + writer.uint32(10).string(message.scanPolicyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteScanPolicyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteScanPolicyRequest, + } as DeleteScanPolicyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.scanPolicyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteScanPolicyRequest { + const message = { + ...baseDeleteScanPolicyRequest, + } as DeleteScanPolicyRequest; + message.scanPolicyId = + object.scanPolicyId !== undefined && object.scanPolicyId !== null + ? String(object.scanPolicyId) + : ""; + return message; + }, + + toJSON(message: DeleteScanPolicyRequest): unknown { + const obj: any = {}; + message.scanPolicyId !== undefined && + (obj.scanPolicyId = message.scanPolicyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteScanPolicyRequest { + const message = { + ...baseDeleteScanPolicyRequest, + } as DeleteScanPolicyRequest; + message.scanPolicyId = object.scanPolicyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteScanPolicyRequest.$type, DeleteScanPolicyRequest); + +const baseCreateScanPolicyMetadata: object = { + $type: "yandex.cloud.containerregistry.v1.CreateScanPolicyMetadata", + scanPolicyId: "", +}; + +export const CreateScanPolicyMetadata = { + $type: "yandex.cloud.containerregistry.v1.CreateScanPolicyMetadata" as const, + + encode( + message: CreateScanPolicyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.scanPolicyId !== "") { + writer.uint32(10).string(message.scanPolicyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateScanPolicyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateScanPolicyMetadata, + } as CreateScanPolicyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.scanPolicyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateScanPolicyMetadata { + const message = { + ...baseCreateScanPolicyMetadata, + } as CreateScanPolicyMetadata; + message.scanPolicyId = + object.scanPolicyId !== undefined && object.scanPolicyId !== null + ? String(object.scanPolicyId) + : ""; + return message; + }, + + toJSON(message: CreateScanPolicyMetadata): unknown { + const obj: any = {}; + message.scanPolicyId !== undefined && + (obj.scanPolicyId = message.scanPolicyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateScanPolicyMetadata { + const message = { + ...baseCreateScanPolicyMetadata, + } as CreateScanPolicyMetadata; + message.scanPolicyId = object.scanPolicyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateScanPolicyMetadata.$type, + CreateScanPolicyMetadata +); + +const baseUpdateScanPolicyMetadata: object = { + $type: "yandex.cloud.containerregistry.v1.UpdateScanPolicyMetadata", + scanPolicyId: "", +}; + +export const UpdateScanPolicyMetadata = { + $type: "yandex.cloud.containerregistry.v1.UpdateScanPolicyMetadata" as const, + + encode( + message: UpdateScanPolicyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.scanPolicyId !== "") { + writer.uint32(10).string(message.scanPolicyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateScanPolicyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateScanPolicyMetadata, + } as UpdateScanPolicyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.scanPolicyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateScanPolicyMetadata { + const message = { + ...baseUpdateScanPolicyMetadata, + } as UpdateScanPolicyMetadata; + message.scanPolicyId = + object.scanPolicyId !== undefined && object.scanPolicyId !== null + ? String(object.scanPolicyId) + : ""; + return message; + }, + + toJSON(message: UpdateScanPolicyMetadata): unknown { + const obj: any = {}; + message.scanPolicyId !== undefined && + (obj.scanPolicyId = message.scanPolicyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateScanPolicyMetadata { + const message = { + ...baseUpdateScanPolicyMetadata, + } as UpdateScanPolicyMetadata; + message.scanPolicyId = object.scanPolicyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateScanPolicyMetadata.$type, + UpdateScanPolicyMetadata +); + +const baseDeleteScanPolicyMetadata: object = { + $type: "yandex.cloud.containerregistry.v1.DeleteScanPolicyMetadata", + scanPolicyId: "", +}; + +export const DeleteScanPolicyMetadata = { + $type: "yandex.cloud.containerregistry.v1.DeleteScanPolicyMetadata" as const, + + encode( + message: DeleteScanPolicyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.scanPolicyId !== "") { + writer.uint32(10).string(message.scanPolicyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteScanPolicyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteScanPolicyMetadata, + } as DeleteScanPolicyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.scanPolicyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteScanPolicyMetadata { + const message = { + ...baseDeleteScanPolicyMetadata, + } as DeleteScanPolicyMetadata; + message.scanPolicyId = + object.scanPolicyId !== undefined && object.scanPolicyId !== null + ? String(object.scanPolicyId) + : ""; + return message; + }, + + toJSON(message: DeleteScanPolicyMetadata): unknown { + const obj: any = {}; + message.scanPolicyId !== undefined && + (obj.scanPolicyId = message.scanPolicyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteScanPolicyMetadata { + const message = { + ...baseDeleteScanPolicyMetadata, + } as DeleteScanPolicyMetadata; + message.scanPolicyId = object.scanPolicyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteScanPolicyMetadata.$type, + DeleteScanPolicyMetadata +); + +/** A set of methods for managing scan policy resources. */ +export const ScanPolicyServiceService = { + /** Returns the specified scan policy. */ + get: { + path: "/yandex.cloud.containerregistry.v1.ScanPolicyService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetScanPolicyRequest) => + Buffer.from(GetScanPolicyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetScanPolicyRequest.decode(value), + responseSerialize: (value: ScanPolicy) => + Buffer.from(ScanPolicy.encode(value).finish()), + responseDeserialize: (value: Buffer) => ScanPolicy.decode(value), + }, + /** Returns scan policy for the registry if any exists. */ + getByRegistry: { + path: "/yandex.cloud.containerregistry.v1.ScanPolicyService/GetByRegistry", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetScanPolicyByRegistryRequest) => + Buffer.from(GetScanPolicyByRegistryRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + GetScanPolicyByRegistryRequest.decode(value), + responseSerialize: (value: ScanPolicy) => + Buffer.from(ScanPolicy.encode(value).finish()), + responseDeserialize: (value: Buffer) => ScanPolicy.decode(value), + }, + /** Creates a scan policy for the specified registry. */ + create: { + path: "/yandex.cloud.containerregistry.v1.ScanPolicyService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateScanPolicyRequest) => + Buffer.from(CreateScanPolicyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + CreateScanPolicyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified scan policy. */ + update: { + path: "/yandex.cloud.containerregistry.v1.ScanPolicyService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateScanPolicyRequest) => + Buffer.from(UpdateScanPolicyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateScanPolicyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified scan policy. */ + delete: { + path: "/yandex.cloud.containerregistry.v1.ScanPolicyService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteScanPolicyRequest) => + Buffer.from(DeleteScanPolicyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteScanPolicyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface ScanPolicyServiceServer extends UntypedServiceImplementation { + /** Returns the specified scan policy. */ + get: handleUnaryCall; + /** Returns scan policy for the registry if any exists. */ + getByRegistry: handleUnaryCall; + /** Creates a scan policy for the specified registry. */ + create: handleUnaryCall; + /** Updates the specified scan policy. */ + update: handleUnaryCall; + /** Deletes the specified scan policy. */ + delete: handleUnaryCall; +} + +export interface ScanPolicyServiceClient extends Client { + /** Returns the specified scan policy. */ + get( + request: GetScanPolicyRequest, + callback: (error: ServiceError | null, response: ScanPolicy) => void + ): ClientUnaryCall; + get( + request: GetScanPolicyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: ScanPolicy) => void + ): ClientUnaryCall; + get( + request: GetScanPolicyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: ScanPolicy) => void + ): ClientUnaryCall; + /** Returns scan policy for the registry if any exists. */ + getByRegistry( + request: GetScanPolicyByRegistryRequest, + callback: (error: ServiceError | null, response: ScanPolicy) => void + ): ClientUnaryCall; + getByRegistry( + request: GetScanPolicyByRegistryRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: ScanPolicy) => void + ): ClientUnaryCall; + getByRegistry( + request: GetScanPolicyByRegistryRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: ScanPolicy) => void + ): ClientUnaryCall; + /** Creates a scan policy for the specified registry. */ + create( + request: CreateScanPolicyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateScanPolicyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateScanPolicyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified scan policy. */ + update( + request: UpdateScanPolicyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateScanPolicyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateScanPolicyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified scan policy. */ + delete( + request: DeleteScanPolicyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteScanPolicyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteScanPolicyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const ScanPolicyServiceClient = makeGenericClientConstructor( + ScanPolicyServiceService, + "yandex.cloud.containerregistry.v1.ScanPolicyService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ScanPolicyServiceClient; + service: typeof ScanPolicyServiceService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/dataproc/v1/cluster.ts b/src/generated/yandex/cloud/dataproc/v1/cluster.ts index c477b3da..910069bc 100644 --- a/src/generated/yandex/cloud/dataproc/v1/cluster.ts +++ b/src/generated/yandex/cloud/dataproc/v1/cluster.ts @@ -281,7 +281,7 @@ export interface ClusterConfig { $type: "yandex.cloud.dataproc.v1.ClusterConfig"; /** * Image version for cluster provisioning. - * All available versions are listed in the [documentation](/docs/managed-hadoop/concepts/image-versions). + * All available versions are listed in the [documentation](/docs/data-proc/concepts/environment). */ versionId: string; /** Data Proc specific configuration options. */ diff --git a/src/generated/yandex/cloud/dataproc/v1/cluster_service.ts b/src/generated/yandex/cloud/dataproc/v1/cluster_service.ts index a56c0650..4a116687 100644 --- a/src/generated/yandex/cloud/dataproc/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/dataproc/v1/cluster_service.ts @@ -128,7 +128,7 @@ export interface CreateClusterConfigSpec { /** * Version of the image for cluster provisioning. * - * All available versions are listed in the [documentation](/docs/data-proc/concepts/image-versions). + * All available versions are listed in the [documentation](/docs/data-proc/concepts/environment). */ versionId: string; /** Data Proc specific options. */ diff --git a/src/generated/yandex/cloud/dataproc/v1/job.ts b/src/generated/yandex/cloud/dataproc/v1/job.ts index f67c15b6..888b4626 100644 --- a/src/generated/yandex/cloud/dataproc/v1/job.ts +++ b/src/generated/yandex/cloud/dataproc/v1/job.ts @@ -6,7 +6,7 @@ import { Timestamp } from "../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.dataproc.v1"; -/** A Data Proc job. For details about the concept, see [documentation](/docs/dataproc/concepts/jobs). */ +/** A Data Proc job. For details about the concept, see [documentation](/docs/data-proc/concepts/jobs). */ export interface Job { $type: "yandex.cloud.dataproc.v1.Job"; /** ID of the job. Generated at creation time. */ diff --git a/src/generated/yandex/cloud/datasphere/index.ts b/src/generated/yandex/cloud/datasphere/index.ts index f38a1d15..39b19444 100644 --- a/src/generated/yandex/cloud/datasphere/index.ts +++ b/src/generated/yandex/cloud/datasphere/index.ts @@ -1,6 +1,12 @@ export * as app_token_service from './v1/app_token_service' export * as folder_budget_service from './v1/folder_budget_service' +export * as node_execution_error_details from './v1/node_execution_error_details' export * as node_service from './v1/node_service' -export * as project from './v1/project' export * as project_data_service from './v1/project_data_service' -export * as project_service from './v1/project_service' \ No newline at end of file +export * as community from './v2/community' +export * as community_service from './v2/community_service' +export * as dataset from './v2/dataset' +export * as project from './v2/project' +export * as project_service from './v2/project_service' +export * as secret from './v2/secret' +export * as user from './v2/user' diff --git a/src/generated/yandex/cloud/datasphere/v1/node_execution_error_details.ts b/src/generated/yandex/cloud/datasphere/v1/node_execution_error_details.ts new file mode 100644 index 00000000..30d62e38 --- /dev/null +++ b/src/generated/yandex/cloud/datasphere/v1/node_execution_error_details.ts @@ -0,0 +1,152 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.datasphere.v1"; + +/** User code python execution's error details */ +export interface NodeExecutionErrorDetails { + $type: "yandex.cloud.datasphere.v1.NodeExecutionErrorDetails"; + /** Error name */ + errorName: string; + /** Error message */ + errorMessage: string; + /** Error traceback */ + traceback: string[]; +} + +const baseNodeExecutionErrorDetails: object = { + $type: "yandex.cloud.datasphere.v1.NodeExecutionErrorDetails", + errorName: "", + errorMessage: "", + traceback: "", +}; + +export const NodeExecutionErrorDetails = { + $type: "yandex.cloud.datasphere.v1.NodeExecutionErrorDetails" as const, + + encode( + message: NodeExecutionErrorDetails, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.errorName !== "") { + writer.uint32(10).string(message.errorName); + } + if (message.errorMessage !== "") { + writer.uint32(18).string(message.errorMessage); + } + for (const v of message.traceback) { + writer.uint32(26).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): NodeExecutionErrorDetails { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseNodeExecutionErrorDetails, + } as NodeExecutionErrorDetails; + message.traceback = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.errorName = reader.string(); + break; + case 2: + message.errorMessage = reader.string(); + break; + case 3: + message.traceback.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): NodeExecutionErrorDetails { + const message = { + ...baseNodeExecutionErrorDetails, + } as NodeExecutionErrorDetails; + message.errorName = + object.errorName !== undefined && object.errorName !== null + ? String(object.errorName) + : ""; + message.errorMessage = + object.errorMessage !== undefined && object.errorMessage !== null + ? String(object.errorMessage) + : ""; + message.traceback = (object.traceback ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: NodeExecutionErrorDetails): unknown { + const obj: any = {}; + message.errorName !== undefined && (obj.errorName = message.errorName); + message.errorMessage !== undefined && + (obj.errorMessage = message.errorMessage); + if (message.traceback) { + obj.traceback = message.traceback.map((e) => e); + } else { + obj.traceback = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): NodeExecutionErrorDetails { + const message = { + ...baseNodeExecutionErrorDetails, + } as NodeExecutionErrorDetails; + message.errorName = object.errorName ?? ""; + message.errorMessage = object.errorMessage ?? ""; + message.traceback = object.traceback?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + NodeExecutionErrorDetails.$type, + NodeExecutionErrorDetails +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datasphere/v1/node_service.ts b/src/generated/yandex/cloud/datasphere/v1/node_service.ts index 8e47198d..e2564e2c 100644 --- a/src/generated/yandex/cloud/datasphere/v1/node_service.ts +++ b/src/generated/yandex/cloud/datasphere/v1/node_service.ts @@ -34,6 +34,22 @@ export interface NodeExecutionResponse { output?: { [key: string]: any }; } +export interface AliasExecutionRequest { + $type: "yandex.cloud.datasphere.v1.AliasExecutionRequest"; + /** ID of the folder that will be matched with Alias ACL */ + folderId: string; + /** Name of the Alias to perform request on */ + aliasName: string; + /** Input data for the execution */ + input?: { [key: string]: any }; +} + +export interface AliasExecutionResponse { + $type: "yandex.cloud.datasphere.v1.AliasExecutionResponse"; + /** Result of the execution */ + output?: { [key: string]: any }; +} + const baseNodeExecutionRequest: object = { $type: "yandex.cloud.datasphere.v1.NodeExecutionRequest", folderId: "", @@ -191,6 +207,163 @@ export const NodeExecutionResponse = { messageTypeRegistry.set(NodeExecutionResponse.$type, NodeExecutionResponse); +const baseAliasExecutionRequest: object = { + $type: "yandex.cloud.datasphere.v1.AliasExecutionRequest", + folderId: "", + aliasName: "", +}; + +export const AliasExecutionRequest = { + $type: "yandex.cloud.datasphere.v1.AliasExecutionRequest" as const, + + encode( + message: AliasExecutionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.aliasName !== "") { + writer.uint32(18).string(message.aliasName); + } + if (message.input !== undefined) { + Struct.encode( + Struct.wrap(message.input), + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AliasExecutionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAliasExecutionRequest } as AliasExecutionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.aliasName = reader.string(); + break; + case 3: + message.input = Struct.unwrap(Struct.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AliasExecutionRequest { + const message = { ...baseAliasExecutionRequest } as AliasExecutionRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.aliasName = + object.aliasName !== undefined && object.aliasName !== null + ? String(object.aliasName) + : ""; + message.input = typeof object.input === "object" ? object.input : undefined; + return message; + }, + + toJSON(message: AliasExecutionRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.aliasName !== undefined && (obj.aliasName = message.aliasName); + message.input !== undefined && (obj.input = message.input); + return obj; + }, + + fromPartial, I>>( + object: I + ): AliasExecutionRequest { + const message = { ...baseAliasExecutionRequest } as AliasExecutionRequest; + message.folderId = object.folderId ?? ""; + message.aliasName = object.aliasName ?? ""; + message.input = object.input ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(AliasExecutionRequest.$type, AliasExecutionRequest); + +const baseAliasExecutionResponse: object = { + $type: "yandex.cloud.datasphere.v1.AliasExecutionResponse", +}; + +export const AliasExecutionResponse = { + $type: "yandex.cloud.datasphere.v1.AliasExecutionResponse" as const, + + encode( + message: AliasExecutionResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.output !== undefined) { + Struct.encode( + Struct.wrap(message.output), + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AliasExecutionResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAliasExecutionResponse } as AliasExecutionResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.output = Struct.unwrap( + Struct.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AliasExecutionResponse { + const message = { ...baseAliasExecutionResponse } as AliasExecutionResponse; + message.output = + typeof object.output === "object" ? object.output : undefined; + return message; + }, + + toJSON(message: AliasExecutionResponse): unknown { + const obj: any = {}; + message.output !== undefined && (obj.output = message.output); + return obj; + }, + + fromPartial, I>>( + object: I + ): AliasExecutionResponse { + const message = { ...baseAliasExecutionResponse } as AliasExecutionResponse; + message.output = object.output ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(AliasExecutionResponse.$type, AliasExecutionResponse); + /** A set of methods for managing Node resources. */ export const NodeServiceService = { /** Executes deployed Node. */ @@ -205,11 +378,26 @@ export const NodeServiceService = { Buffer.from(NodeExecutionResponse.encode(value).finish()), responseDeserialize: (value: Buffer) => NodeExecutionResponse.decode(value), }, + /** Executes NodeAlias requests. */ + executeAlias: { + path: "/yandex.cloud.datasphere.v1.NodeService/ExecuteAlias", + requestStream: false, + responseStream: false, + requestSerialize: (value: AliasExecutionRequest) => + Buffer.from(AliasExecutionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => AliasExecutionRequest.decode(value), + responseSerialize: (value: AliasExecutionResponse) => + Buffer.from(AliasExecutionResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + AliasExecutionResponse.decode(value), + }, } as const; export interface NodeServiceServer extends UntypedServiceImplementation { /** Executes deployed Node. */ execute: handleUnaryCall; + /** Executes NodeAlias requests. */ + executeAlias: handleUnaryCall; } export interface NodeServiceClient extends Client { @@ -238,6 +426,31 @@ export interface NodeServiceClient extends Client { response: NodeExecutionResponse ) => void ): ClientUnaryCall; + /** Executes NodeAlias requests. */ + executeAlias( + request: AliasExecutionRequest, + callback: ( + error: ServiceError | null, + response: AliasExecutionResponse + ) => void + ): ClientUnaryCall; + executeAlias( + request: AliasExecutionRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: AliasExecutionResponse + ) => void + ): ClientUnaryCall; + executeAlias( + request: AliasExecutionRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: AliasExecutionResponse + ) => void + ): ClientUnaryCall; } export const NodeServiceClient = makeGenericClientConstructor( diff --git a/src/generated/yandex/cloud/datasphere/v2/community.ts b/src/generated/yandex/cloud/datasphere/v2/community.ts new file mode 100644 index 00000000..6dc88c51 --- /dev/null +++ b/src/generated/yandex/cloud/datasphere/v2/community.ts @@ -0,0 +1,331 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.datasphere.v2"; + +export interface Community { + $type: "yandex.cloud.datasphere.v2.Community"; + /** ID of the community. */ + id: string; + /** Time when community was created. */ + createdAt?: Date; + /** Name of the community. */ + name: string; + /** Description of the comminuty. */ + description: string; + /** Labels of the community. */ + labels: { [key: string]: string }; + /** ID of the user who created the community. */ + createdById: string; + /** ID of the organization to which community belongs. */ + organizationId: string; +} + +export interface Community_LabelsEntry { + $type: "yandex.cloud.datasphere.v2.Community.LabelsEntry"; + key: string; + value: string; +} + +const baseCommunity: object = { + $type: "yandex.cloud.datasphere.v2.Community", + id: "", + name: "", + description: "", + createdById: "", + organizationId: "", +}; + +export const Community = { + $type: "yandex.cloud.datasphere.v2.Community" as const, + + encode( + message: Community, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(18).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + Community_LabelsEntry.encode( + { + $type: "yandex.cloud.datasphere.v2.Community.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + if (message.createdById !== "") { + writer.uint32(50).string(message.createdById); + } + if (message.organizationId !== "") { + writer.uint32(82).string(message.organizationId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Community { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCommunity } as Community; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = Community_LabelsEntry.decode(reader, reader.uint32()); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + case 6: + message.createdById = reader.string(); + break; + case 10: + message.organizationId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Community { + const message = { ...baseCommunity } as Community; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.createdById = + object.createdById !== undefined && object.createdById !== null + ? String(object.createdById) + : ""; + message.organizationId = + object.organizationId !== undefined && object.organizationId !== null + ? String(object.organizationId) + : ""; + return message; + }, + + toJSON(message: Community): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.createdById !== undefined && + (obj.createdById = message.createdById); + message.organizationId !== undefined && + (obj.organizationId = message.organizationId); + return obj; + }, + + fromPartial, I>>( + object: I + ): Community { + const message = { ...baseCommunity } as Community; + message.id = object.id ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.createdById = object.createdById ?? ""; + message.organizationId = object.organizationId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Community.$type, Community); + +const baseCommunity_LabelsEntry: object = { + $type: "yandex.cloud.datasphere.v2.Community.LabelsEntry", + key: "", + value: "", +}; + +export const Community_LabelsEntry = { + $type: "yandex.cloud.datasphere.v2.Community.LabelsEntry" as const, + + encode( + message: Community_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Community_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCommunity_LabelsEntry } as Community_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Community_LabelsEntry { + const message = { ...baseCommunity_LabelsEntry } as Community_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Community_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): Community_LabelsEntry { + const message = { ...baseCommunity_LabelsEntry } as Community_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Community_LabelsEntry.$type, Community_LabelsEntry); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datasphere/v2/community_service.ts b/src/generated/yandex/cloud/datasphere/v2/community_service.ts new file mode 100644 index 00000000..ec00b284 --- /dev/null +++ b/src/generated/yandex/cloud/datasphere/v2/community_service.ts @@ -0,0 +1,1682 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { FieldMask } from "../../../../google/protobuf/field_mask"; +import { Community } from "../../../../yandex/cloud/datasphere/v2/community"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; +import { + ListAccessBindingsRequest, + ListAccessBindingsResponse, + SetAccessBindingsRequest, + UpdateAccessBindingsRequest, +} from "../../../../yandex/cloud/access/access"; + +export const protobufPackage = "yandex.cloud.datasphere.v2"; + +export interface CreateCommunityRequest { + $type: "yandex.cloud.datasphere.v2.CreateCommunityRequest"; + /** Name of the community. */ + name: string; + /** Description of the community. */ + description: string; + /** ID of the organization where community should be created. */ + organizationId: string; + /** ID of the billing account for the created community. Optional, billing account could be bound to community later. */ + billingAccountId: string; + /** Labels of the community. */ + labels: { [key: string]: string }; +} + +export interface CreateCommunityRequest_LabelsEntry { + $type: "yandex.cloud.datasphere.v2.CreateCommunityRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateCommunityMetadata { + $type: "yandex.cloud.datasphere.v2.CreateCommunityMetadata"; + /** ID of the community that is being created. */ + communityId: string; +} + +export interface GetCommunityRequest { + $type: "yandex.cloud.datasphere.v2.GetCommunityRequest"; + /** ID of the community. */ + communityId: string; +} + +export interface UpdateCommunityRequest { + $type: "yandex.cloud.datasphere.v2.UpdateCommunityRequest"; + /** ID of the community. */ + communityId: string; + /** Field mask that specifies which fields of the Community resource are going to be updated. */ + updateMask?: FieldMask; + /** Name of the community. */ + name: string; + /** Description of the community. */ + description: string; + /** Labels of the community. */ + labels: { [key: string]: string }; +} + +export interface UpdateCommunityRequest_LabelsEntry { + $type: "yandex.cloud.datasphere.v2.UpdateCommunityRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateCommunityMetadata { + $type: "yandex.cloud.datasphere.v2.UpdateCommunityMetadata"; + /** ID of the community that is being updated. */ + communityId: string; +} + +export interface DeleteCommunityRequest { + $type: "yandex.cloud.datasphere.v2.DeleteCommunityRequest"; + /** ID of the community. */ + communityId: string; +} + +export interface DeleteCommunityMetadata { + $type: "yandex.cloud.datasphere.v2.DeleteCommunityMetadata"; + /** ID of the community that is being deleted. */ + communityId: string; +} + +export interface ListCommunitiesRequest { + $type: "yandex.cloud.datasphere.v2.ListCommunitiesRequest"; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], + * the service returns a [ListCommunitiesResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListCommunitiesResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; + /** + * Community name or description pattern. + * Only communities with names or descriptions matching specified pattern will be returned. + */ + nameOrDescriptionPattern: string; + /** ID of the user. Only communities owned by specified user will be returned. */ + ownedById: string; + /** If set to true, only public communities will be returned. */ + listPublic: boolean; + /** ID of the organization to list communities in. */ + organizationId: string; +} + +export interface ListCommunitiesResponse { + $type: "yandex.cloud.datasphere.v2.ListCommunitiesResponse"; + /** List of communities matching filters in list communities request. */ + communities: Community[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListCommunitiesRequest.page_size], use + * the [next_page_token] as the value + * for the [ListCommunitiesRequest.page_token] query parameter + * in the next list request. Each subsequent list request will have its own + * [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface SetCommunityAccessBindingsMetadata { + $type: "yandex.cloud.datasphere.v2.SetCommunityAccessBindingsMetadata"; + /** ID of the community which access bindings are set. */ + communityId: string; +} + +export interface UpdateCommunityAccessBindingsMetadata { + $type: "yandex.cloud.datasphere.v2.UpdateCommunityAccessBindingsMetadata"; + /** ID of the community which access bindings are updated. */ + communityId: string; +} + +const baseCreateCommunityRequest: object = { + $type: "yandex.cloud.datasphere.v2.CreateCommunityRequest", + name: "", + description: "", + organizationId: "", + billingAccountId: "", +}; + +export const CreateCommunityRequest = { + $type: "yandex.cloud.datasphere.v2.CreateCommunityRequest" as const, + + encode( + message: CreateCommunityRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.description !== "") { + writer.uint32(18).string(message.description); + } + if (message.organizationId !== "") { + writer.uint32(26).string(message.organizationId); + } + if (message.billingAccountId !== "") { + writer.uint32(34).string(message.billingAccountId); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateCommunityRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.datasphere.v2.CreateCommunityRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateCommunityRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateCommunityRequest } as CreateCommunityRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.description = reader.string(); + break; + case 3: + message.organizationId = reader.string(); + break; + case 4: + message.billingAccountId = reader.string(); + break; + case 5: + const entry5 = CreateCommunityRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateCommunityRequest { + const message = { ...baseCreateCommunityRequest } as CreateCommunityRequest; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.organizationId = + object.organizationId !== undefined && object.organizationId !== null + ? String(object.organizationId) + : ""; + message.billingAccountId = + object.billingAccountId !== undefined && object.billingAccountId !== null + ? String(object.billingAccountId) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + return message; + }, + + toJSON(message: CreateCommunityRequest): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + message.organizationId !== undefined && + (obj.organizationId = message.organizationId); + message.billingAccountId !== undefined && + (obj.billingAccountId = message.billingAccountId); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateCommunityRequest { + const message = { ...baseCreateCommunityRequest } as CreateCommunityRequest; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.organizationId = object.organizationId ?? ""; + message.billingAccountId = object.billingAccountId ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + return message; + }, +}; + +messageTypeRegistry.set(CreateCommunityRequest.$type, CreateCommunityRequest); + +const baseCreateCommunityRequest_LabelsEntry: object = { + $type: "yandex.cloud.datasphere.v2.CreateCommunityRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateCommunityRequest_LabelsEntry = { + $type: + "yandex.cloud.datasphere.v2.CreateCommunityRequest.LabelsEntry" as const, + + encode( + message: CreateCommunityRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateCommunityRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateCommunityRequest_LabelsEntry, + } as CreateCommunityRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateCommunityRequest_LabelsEntry { + const message = { + ...baseCreateCommunityRequest_LabelsEntry, + } as CreateCommunityRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateCommunityRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateCommunityRequest_LabelsEntry { + const message = { + ...baseCreateCommunityRequest_LabelsEntry, + } as CreateCommunityRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateCommunityRequest_LabelsEntry.$type, + CreateCommunityRequest_LabelsEntry +); + +const baseCreateCommunityMetadata: object = { + $type: "yandex.cloud.datasphere.v2.CreateCommunityMetadata", + communityId: "", +}; + +export const CreateCommunityMetadata = { + $type: "yandex.cloud.datasphere.v2.CreateCommunityMetadata" as const, + + encode( + message: CreateCommunityMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.communityId !== "") { + writer.uint32(10).string(message.communityId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateCommunityMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateCommunityMetadata, + } as CreateCommunityMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.communityId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateCommunityMetadata { + const message = { + ...baseCreateCommunityMetadata, + } as CreateCommunityMetadata; + message.communityId = + object.communityId !== undefined && object.communityId !== null + ? String(object.communityId) + : ""; + return message; + }, + + toJSON(message: CreateCommunityMetadata): unknown { + const obj: any = {}; + message.communityId !== undefined && + (obj.communityId = message.communityId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateCommunityMetadata { + const message = { + ...baseCreateCommunityMetadata, + } as CreateCommunityMetadata; + message.communityId = object.communityId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateCommunityMetadata.$type, CreateCommunityMetadata); + +const baseGetCommunityRequest: object = { + $type: "yandex.cloud.datasphere.v2.GetCommunityRequest", + communityId: "", +}; + +export const GetCommunityRequest = { + $type: "yandex.cloud.datasphere.v2.GetCommunityRequest" as const, + + encode( + message: GetCommunityRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.communityId !== "") { + writer.uint32(10).string(message.communityId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetCommunityRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetCommunityRequest } as GetCommunityRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.communityId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetCommunityRequest { + const message = { ...baseGetCommunityRequest } as GetCommunityRequest; + message.communityId = + object.communityId !== undefined && object.communityId !== null + ? String(object.communityId) + : ""; + return message; + }, + + toJSON(message: GetCommunityRequest): unknown { + const obj: any = {}; + message.communityId !== undefined && + (obj.communityId = message.communityId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetCommunityRequest { + const message = { ...baseGetCommunityRequest } as GetCommunityRequest; + message.communityId = object.communityId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetCommunityRequest.$type, GetCommunityRequest); + +const baseUpdateCommunityRequest: object = { + $type: "yandex.cloud.datasphere.v2.UpdateCommunityRequest", + communityId: "", + name: "", + description: "", +}; + +export const UpdateCommunityRequest = { + $type: "yandex.cloud.datasphere.v2.UpdateCommunityRequest" as const, + + encode( + message: UpdateCommunityRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.communityId !== "") { + writer.uint32(10).string(message.communityId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateCommunityRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.datasphere.v2.UpdateCommunityRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateCommunityRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateCommunityRequest } as UpdateCommunityRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.communityId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = UpdateCommunityRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateCommunityRequest { + const message = { ...baseUpdateCommunityRequest } as UpdateCommunityRequest; + message.communityId = + object.communityId !== undefined && object.communityId !== null + ? String(object.communityId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + return message; + }, + + toJSON(message: UpdateCommunityRequest): unknown { + const obj: any = {}; + message.communityId !== undefined && + (obj.communityId = message.communityId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateCommunityRequest { + const message = { ...baseUpdateCommunityRequest } as UpdateCommunityRequest; + message.communityId = object.communityId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + return message; + }, +}; + +messageTypeRegistry.set(UpdateCommunityRequest.$type, UpdateCommunityRequest); + +const baseUpdateCommunityRequest_LabelsEntry: object = { + $type: "yandex.cloud.datasphere.v2.UpdateCommunityRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateCommunityRequest_LabelsEntry = { + $type: + "yandex.cloud.datasphere.v2.UpdateCommunityRequest.LabelsEntry" as const, + + encode( + message: UpdateCommunityRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateCommunityRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateCommunityRequest_LabelsEntry, + } as UpdateCommunityRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateCommunityRequest_LabelsEntry { + const message = { + ...baseUpdateCommunityRequest_LabelsEntry, + } as UpdateCommunityRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateCommunityRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateCommunityRequest_LabelsEntry { + const message = { + ...baseUpdateCommunityRequest_LabelsEntry, + } as UpdateCommunityRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateCommunityRequest_LabelsEntry.$type, + UpdateCommunityRequest_LabelsEntry +); + +const baseUpdateCommunityMetadata: object = { + $type: "yandex.cloud.datasphere.v2.UpdateCommunityMetadata", + communityId: "", +}; + +export const UpdateCommunityMetadata = { + $type: "yandex.cloud.datasphere.v2.UpdateCommunityMetadata" as const, + + encode( + message: UpdateCommunityMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.communityId !== "") { + writer.uint32(10).string(message.communityId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateCommunityMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateCommunityMetadata, + } as UpdateCommunityMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.communityId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateCommunityMetadata { + const message = { + ...baseUpdateCommunityMetadata, + } as UpdateCommunityMetadata; + message.communityId = + object.communityId !== undefined && object.communityId !== null + ? String(object.communityId) + : ""; + return message; + }, + + toJSON(message: UpdateCommunityMetadata): unknown { + const obj: any = {}; + message.communityId !== undefined && + (obj.communityId = message.communityId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateCommunityMetadata { + const message = { + ...baseUpdateCommunityMetadata, + } as UpdateCommunityMetadata; + message.communityId = object.communityId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateCommunityMetadata.$type, UpdateCommunityMetadata); + +const baseDeleteCommunityRequest: object = { + $type: "yandex.cloud.datasphere.v2.DeleteCommunityRequest", + communityId: "", +}; + +export const DeleteCommunityRequest = { + $type: "yandex.cloud.datasphere.v2.DeleteCommunityRequest" as const, + + encode( + message: DeleteCommunityRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.communityId !== "") { + writer.uint32(10).string(message.communityId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteCommunityRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteCommunityRequest } as DeleteCommunityRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.communityId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteCommunityRequest { + const message = { ...baseDeleteCommunityRequest } as DeleteCommunityRequest; + message.communityId = + object.communityId !== undefined && object.communityId !== null + ? String(object.communityId) + : ""; + return message; + }, + + toJSON(message: DeleteCommunityRequest): unknown { + const obj: any = {}; + message.communityId !== undefined && + (obj.communityId = message.communityId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteCommunityRequest { + const message = { ...baseDeleteCommunityRequest } as DeleteCommunityRequest; + message.communityId = object.communityId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteCommunityRequest.$type, DeleteCommunityRequest); + +const baseDeleteCommunityMetadata: object = { + $type: "yandex.cloud.datasphere.v2.DeleteCommunityMetadata", + communityId: "", +}; + +export const DeleteCommunityMetadata = { + $type: "yandex.cloud.datasphere.v2.DeleteCommunityMetadata" as const, + + encode( + message: DeleteCommunityMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.communityId !== "") { + writer.uint32(10).string(message.communityId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteCommunityMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteCommunityMetadata, + } as DeleteCommunityMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.communityId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteCommunityMetadata { + const message = { + ...baseDeleteCommunityMetadata, + } as DeleteCommunityMetadata; + message.communityId = + object.communityId !== undefined && object.communityId !== null + ? String(object.communityId) + : ""; + return message; + }, + + toJSON(message: DeleteCommunityMetadata): unknown { + const obj: any = {}; + message.communityId !== undefined && + (obj.communityId = message.communityId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteCommunityMetadata { + const message = { + ...baseDeleteCommunityMetadata, + } as DeleteCommunityMetadata; + message.communityId = object.communityId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteCommunityMetadata.$type, DeleteCommunityMetadata); + +const baseListCommunitiesRequest: object = { + $type: "yandex.cloud.datasphere.v2.ListCommunitiesRequest", + pageSize: 0, + pageToken: "", + nameOrDescriptionPattern: "", + ownedById: "", + listPublic: false, + organizationId: "", +}; + +export const ListCommunitiesRequest = { + $type: "yandex.cloud.datasphere.v2.ListCommunitiesRequest" as const, + + encode( + message: ListCommunitiesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.pageSize !== 0) { + writer.uint32(8).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(18).string(message.pageToken); + } + if (message.nameOrDescriptionPattern !== "") { + writer.uint32(26).string(message.nameOrDescriptionPattern); + } + if (message.ownedById !== "") { + writer.uint32(42).string(message.ownedById); + } + if (message.listPublic === true) { + writer.uint32(48).bool(message.listPublic); + } + if (message.organizationId !== "") { + writer.uint32(58).string(message.organizationId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListCommunitiesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListCommunitiesRequest } as ListCommunitiesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 2: + message.pageToken = reader.string(); + break; + case 3: + message.nameOrDescriptionPattern = reader.string(); + break; + case 5: + message.ownedById = reader.string(); + break; + case 6: + message.listPublic = reader.bool(); + break; + case 7: + message.organizationId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListCommunitiesRequest { + const message = { ...baseListCommunitiesRequest } as ListCommunitiesRequest; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.nameOrDescriptionPattern = + object.nameOrDescriptionPattern !== undefined && + object.nameOrDescriptionPattern !== null + ? String(object.nameOrDescriptionPattern) + : ""; + message.ownedById = + object.ownedById !== undefined && object.ownedById !== null + ? String(object.ownedById) + : ""; + message.listPublic = + object.listPublic !== undefined && object.listPublic !== null + ? Boolean(object.listPublic) + : false; + message.organizationId = + object.organizationId !== undefined && object.organizationId !== null + ? String(object.organizationId) + : ""; + return message; + }, + + toJSON(message: ListCommunitiesRequest): unknown { + const obj: any = {}; + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.nameOrDescriptionPattern !== undefined && + (obj.nameOrDescriptionPattern = message.nameOrDescriptionPattern); + message.ownedById !== undefined && (obj.ownedById = message.ownedById); + message.listPublic !== undefined && (obj.listPublic = message.listPublic); + message.organizationId !== undefined && + (obj.organizationId = message.organizationId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListCommunitiesRequest { + const message = { ...baseListCommunitiesRequest } as ListCommunitiesRequest; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.nameOrDescriptionPattern = object.nameOrDescriptionPattern ?? ""; + message.ownedById = object.ownedById ?? ""; + message.listPublic = object.listPublic ?? false; + message.organizationId = object.organizationId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListCommunitiesRequest.$type, ListCommunitiesRequest); + +const baseListCommunitiesResponse: object = { + $type: "yandex.cloud.datasphere.v2.ListCommunitiesResponse", + nextPageToken: "", +}; + +export const ListCommunitiesResponse = { + $type: "yandex.cloud.datasphere.v2.ListCommunitiesResponse" as const, + + encode( + message: ListCommunitiesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.communities) { + Community.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListCommunitiesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListCommunitiesResponse, + } as ListCommunitiesResponse; + message.communities = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.communities.push(Community.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListCommunitiesResponse { + const message = { + ...baseListCommunitiesResponse, + } as ListCommunitiesResponse; + message.communities = (object.communities ?? []).map((e: any) => + Community.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListCommunitiesResponse): unknown { + const obj: any = {}; + if (message.communities) { + obj.communities = message.communities.map((e) => + e ? Community.toJSON(e) : undefined + ); + } else { + obj.communities = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListCommunitiesResponse { + const message = { + ...baseListCommunitiesResponse, + } as ListCommunitiesResponse; + message.communities = + object.communities?.map((e) => Community.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListCommunitiesResponse.$type, ListCommunitiesResponse); + +const baseSetCommunityAccessBindingsMetadata: object = { + $type: "yandex.cloud.datasphere.v2.SetCommunityAccessBindingsMetadata", + communityId: "", +}; + +export const SetCommunityAccessBindingsMetadata = { + $type: + "yandex.cloud.datasphere.v2.SetCommunityAccessBindingsMetadata" as const, + + encode( + message: SetCommunityAccessBindingsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.communityId !== "") { + writer.uint32(10).string(message.communityId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SetCommunityAccessBindingsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSetCommunityAccessBindingsMetadata, + } as SetCommunityAccessBindingsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.communityId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SetCommunityAccessBindingsMetadata { + const message = { + ...baseSetCommunityAccessBindingsMetadata, + } as SetCommunityAccessBindingsMetadata; + message.communityId = + object.communityId !== undefined && object.communityId !== null + ? String(object.communityId) + : ""; + return message; + }, + + toJSON(message: SetCommunityAccessBindingsMetadata): unknown { + const obj: any = {}; + message.communityId !== undefined && + (obj.communityId = message.communityId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): SetCommunityAccessBindingsMetadata { + const message = { + ...baseSetCommunityAccessBindingsMetadata, + } as SetCommunityAccessBindingsMetadata; + message.communityId = object.communityId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + SetCommunityAccessBindingsMetadata.$type, + SetCommunityAccessBindingsMetadata +); + +const baseUpdateCommunityAccessBindingsMetadata: object = { + $type: "yandex.cloud.datasphere.v2.UpdateCommunityAccessBindingsMetadata", + communityId: "", +}; + +export const UpdateCommunityAccessBindingsMetadata = { + $type: + "yandex.cloud.datasphere.v2.UpdateCommunityAccessBindingsMetadata" as const, + + encode( + message: UpdateCommunityAccessBindingsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.communityId !== "") { + writer.uint32(10).string(message.communityId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateCommunityAccessBindingsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateCommunityAccessBindingsMetadata, + } as UpdateCommunityAccessBindingsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.communityId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateCommunityAccessBindingsMetadata { + const message = { + ...baseUpdateCommunityAccessBindingsMetadata, + } as UpdateCommunityAccessBindingsMetadata; + message.communityId = + object.communityId !== undefined && object.communityId !== null + ? String(object.communityId) + : ""; + return message; + }, + + toJSON(message: UpdateCommunityAccessBindingsMetadata): unknown { + const obj: any = {}; + message.communityId !== undefined && + (obj.communityId = message.communityId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateCommunityAccessBindingsMetadata { + const message = { + ...baseUpdateCommunityAccessBindingsMetadata, + } as UpdateCommunityAccessBindingsMetadata; + message.communityId = object.communityId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateCommunityAccessBindingsMetadata.$type, + UpdateCommunityAccessBindingsMetadata +); + +export const CommunityServiceService = { + /** Creates community in specified organization. */ + create: { + path: "/yandex.cloud.datasphere.v2.CommunityService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateCommunityRequest) => + Buffer.from(CreateCommunityRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateCommunityRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Returns community. */ + get: { + path: "/yandex.cloud.datasphere.v2.CommunityService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetCommunityRequest) => + Buffer.from(GetCommunityRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetCommunityRequest.decode(value), + responseSerialize: (value: Community) => + Buffer.from(Community.encode(value).finish()), + responseDeserialize: (value: Buffer) => Community.decode(value), + }, + /** Updates specified community. */ + update: { + path: "/yandex.cloud.datasphere.v2.CommunityService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateCommunityRequest) => + Buffer.from(UpdateCommunityRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateCommunityRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes specified community. */ + delete: { + path: "/yandex.cloud.datasphere.v2.CommunityService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteCommunityRequest) => + Buffer.from(DeleteCommunityRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteCommunityRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** List communities in specified organization. */ + list: { + path: "/yandex.cloud.datasphere.v2.CommunityService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListCommunitiesRequest) => + Buffer.from(ListCommunitiesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListCommunitiesRequest.decode(value), + responseSerialize: (value: ListCommunitiesResponse) => + Buffer.from(ListCommunitiesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListCommunitiesResponse.decode(value), + }, + /** Lists access bindings for specified community. */ + listAccessBindings: { + path: "/yandex.cloud.datasphere.v2.CommunityService/ListAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAccessBindingsRequest) => + Buffer.from(ListAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListAccessBindingsRequest.decode(value), + responseSerialize: (value: ListAccessBindingsResponse) => + Buffer.from(ListAccessBindingsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListAccessBindingsResponse.decode(value), + }, + /** Sets access bindings for specified community. */ + setAccessBindings: { + path: "/yandex.cloud.datasphere.v2.CommunityService/SetAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: SetAccessBindingsRequest) => + Buffer.from(SetAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + SetAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates access bindings for specified community. */ + updateAccessBindings: { + path: "/yandex.cloud.datasphere.v2.CommunityService/UpdateAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAccessBindingsRequest) => + Buffer.from(UpdateAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface CommunityServiceServer extends UntypedServiceImplementation { + /** Creates community in specified organization. */ + create: handleUnaryCall; + /** Returns community. */ + get: handleUnaryCall; + /** Updates specified community. */ + update: handleUnaryCall; + /** Deletes specified community. */ + delete: handleUnaryCall; + /** List communities in specified organization. */ + list: handleUnaryCall; + /** Lists access bindings for specified community. */ + listAccessBindings: handleUnaryCall< + ListAccessBindingsRequest, + ListAccessBindingsResponse + >; + /** Sets access bindings for specified community. */ + setAccessBindings: handleUnaryCall; + /** Updates access bindings for specified community. */ + updateAccessBindings: handleUnaryCall; +} + +export interface CommunityServiceClient extends Client { + /** Creates community in specified organization. */ + create( + request: CreateCommunityRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateCommunityRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateCommunityRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Returns community. */ + get( + request: GetCommunityRequest, + callback: (error: ServiceError | null, response: Community) => void + ): ClientUnaryCall; + get( + request: GetCommunityRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Community) => void + ): ClientUnaryCall; + get( + request: GetCommunityRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Community) => void + ): ClientUnaryCall; + /** Updates specified community. */ + update( + request: UpdateCommunityRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateCommunityRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateCommunityRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes specified community. */ + delete( + request: DeleteCommunityRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteCommunityRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteCommunityRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** List communities in specified organization. */ + list( + request: ListCommunitiesRequest, + callback: ( + error: ServiceError | null, + response: ListCommunitiesResponse + ) => void + ): ClientUnaryCall; + list( + request: ListCommunitiesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListCommunitiesResponse + ) => void + ): ClientUnaryCall; + list( + request: ListCommunitiesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListCommunitiesResponse + ) => void + ): ClientUnaryCall; + /** Lists access bindings for specified community. */ + listAccessBindings( + request: ListAccessBindingsRequest, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + /** Sets access bindings for specified community. */ + setAccessBindings( + request: SetAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates access bindings for specified community. */ + updateAccessBindings( + request: UpdateAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const CommunityServiceClient = makeGenericClientConstructor( + CommunityServiceService, + "yandex.cloud.datasphere.v2.CommunityService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): CommunityServiceClient; + service: typeof CommunityServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datasphere/v2/dataset.ts b/src/generated/yandex/cloud/datasphere/v2/dataset.ts new file mode 100644 index 00000000..c32a3ede --- /dev/null +++ b/src/generated/yandex/cloud/datasphere/v2/dataset.ts @@ -0,0 +1,762 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.datasphere.v2"; + +export interface Dataset { + $type: "yandex.cloud.datasphere.v2.Dataset"; + /** ID of the dataset. */ + id: string; + /** ID of the project. */ + projectId: string; + /** Time the dataset was created. */ + createdAt?: Date; + /** Name of the dataset. */ + name: string; + /** Description of the dataset. */ + description: string; + /** Labels of the dataset. */ + labels: { [key: string]: string }; + /** ID of the user who created the dataset. */ + createdById: string; + /** Code used to create dataset. */ + code: string; + /** Size of the dataset, Gb. */ + sizeGb: number; + /** Zone IDs where dataset is available. */ + zoneIds: string[]; + /** Dataset mount path. */ + mountPath: string; + /** ID of the data capsule object, storing information about dataset storage. */ + dataCapsuleId: string; +} + +export interface Dataset_LabelsEntry { + $type: "yandex.cloud.datasphere.v2.Dataset.LabelsEntry"; + key: string; + value: string; +} + +export interface DatasetStatus { + $type: "yandex.cloud.datasphere.v2.DatasetStatus"; + /** Dataset is activated. */ + statusActive?: DatasetStatus_StatusActive | undefined; + /** Dataset is inactive. */ + statusInactive?: DatasetStatus_StatusInactive | undefined; + /** Error while activating dataset. */ + statusError?: DatasetStatus_StatusError | undefined; +} + +export interface DatasetStatus_StatusActive { + $type: "yandex.cloud.datasphere.v2.DatasetStatus.StatusActive"; +} + +export interface DatasetStatus_StatusInactive { + $type: "yandex.cloud.datasphere.v2.DatasetStatus.StatusInactive"; +} + +export interface DatasetStatus_StatusError { + $type: "yandex.cloud.datasphere.v2.DatasetStatus.StatusError"; + /** Text of the error. */ + error: string; +} + +const baseDataset: object = { + $type: "yandex.cloud.datasphere.v2.Dataset", + id: "", + projectId: "", + name: "", + description: "", + createdById: "", + code: "", + sizeGb: 0, + zoneIds: "", + mountPath: "", + dataCapsuleId: "", +}; + +export const Dataset = { + $type: "yandex.cloud.datasphere.v2.Dataset" as const, + + encode( + message: Dataset, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.projectId !== "") { + writer.uint32(18).string(message.projectId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(34).string(message.name); + } + if (message.description !== "") { + writer.uint32(42).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + Dataset_LabelsEntry.encode( + { + $type: "yandex.cloud.datasphere.v2.Dataset.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(50).fork() + ).ldelim(); + }); + if (message.createdById !== "") { + writer.uint32(58).string(message.createdById); + } + if (message.code !== "") { + writer.uint32(66).string(message.code); + } + if (message.sizeGb !== 0) { + writer.uint32(72).int64(message.sizeGb); + } + for (const v of message.zoneIds) { + writer.uint32(82).string(v!); + } + if (message.mountPath !== "") { + writer.uint32(90).string(message.mountPath); + } + if (message.dataCapsuleId !== "") { + writer.uint32(98).string(message.dataCapsuleId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Dataset { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDataset } as Dataset; + message.labels = {}; + message.zoneIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.projectId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.name = reader.string(); + break; + case 5: + message.description = reader.string(); + break; + case 6: + const entry6 = Dataset_LabelsEntry.decode(reader, reader.uint32()); + if (entry6.value !== undefined) { + message.labels[entry6.key] = entry6.value; + } + break; + case 7: + message.createdById = reader.string(); + break; + case 8: + message.code = reader.string(); + break; + case 9: + message.sizeGb = longToNumber(reader.int64() as Long); + break; + case 10: + message.zoneIds.push(reader.string()); + break; + case 11: + message.mountPath = reader.string(); + break; + case 12: + message.dataCapsuleId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Dataset { + const message = { ...baseDataset } as Dataset; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.createdById = + object.createdById !== undefined && object.createdById !== null + ? String(object.createdById) + : ""; + message.code = + object.code !== undefined && object.code !== null + ? String(object.code) + : ""; + message.sizeGb = + object.sizeGb !== undefined && object.sizeGb !== null + ? Number(object.sizeGb) + : 0; + message.zoneIds = (object.zoneIds ?? []).map((e: any) => String(e)); + message.mountPath = + object.mountPath !== undefined && object.mountPath !== null + ? String(object.mountPath) + : ""; + message.dataCapsuleId = + object.dataCapsuleId !== undefined && object.dataCapsuleId !== null + ? String(object.dataCapsuleId) + : ""; + return message; + }, + + toJSON(message: Dataset): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.projectId !== undefined && (obj.projectId = message.projectId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.createdById !== undefined && + (obj.createdById = message.createdById); + message.code !== undefined && (obj.code = message.code); + message.sizeGb !== undefined && (obj.sizeGb = Math.round(message.sizeGb)); + if (message.zoneIds) { + obj.zoneIds = message.zoneIds.map((e) => e); + } else { + obj.zoneIds = []; + } + message.mountPath !== undefined && (obj.mountPath = message.mountPath); + message.dataCapsuleId !== undefined && + (obj.dataCapsuleId = message.dataCapsuleId); + return obj; + }, + + fromPartial, I>>(object: I): Dataset { + const message = { ...baseDataset } as Dataset; + message.id = object.id ?? ""; + message.projectId = object.projectId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.createdById = object.createdById ?? ""; + message.code = object.code ?? ""; + message.sizeGb = object.sizeGb ?? 0; + message.zoneIds = object.zoneIds?.map((e) => e) || []; + message.mountPath = object.mountPath ?? ""; + message.dataCapsuleId = object.dataCapsuleId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Dataset.$type, Dataset); + +const baseDataset_LabelsEntry: object = { + $type: "yandex.cloud.datasphere.v2.Dataset.LabelsEntry", + key: "", + value: "", +}; + +export const Dataset_LabelsEntry = { + $type: "yandex.cloud.datasphere.v2.Dataset.LabelsEntry" as const, + + encode( + message: Dataset_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Dataset_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDataset_LabelsEntry } as Dataset_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Dataset_LabelsEntry { + const message = { ...baseDataset_LabelsEntry } as Dataset_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Dataset_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): Dataset_LabelsEntry { + const message = { ...baseDataset_LabelsEntry } as Dataset_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Dataset_LabelsEntry.$type, Dataset_LabelsEntry); + +const baseDatasetStatus: object = { + $type: "yandex.cloud.datasphere.v2.DatasetStatus", +}; + +export const DatasetStatus = { + $type: "yandex.cloud.datasphere.v2.DatasetStatus" as const, + + encode( + message: DatasetStatus, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.statusActive !== undefined) { + DatasetStatus_StatusActive.encode( + message.statusActive, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.statusInactive !== undefined) { + DatasetStatus_StatusInactive.encode( + message.statusInactive, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.statusError !== undefined) { + DatasetStatus_StatusError.encode( + message.statusError, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DatasetStatus { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDatasetStatus } as DatasetStatus; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.statusActive = DatasetStatus_StatusActive.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.statusInactive = DatasetStatus_StatusInactive.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.statusError = DatasetStatus_StatusError.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DatasetStatus { + const message = { ...baseDatasetStatus } as DatasetStatus; + message.statusActive = + object.statusActive !== undefined && object.statusActive !== null + ? DatasetStatus_StatusActive.fromJSON(object.statusActive) + : undefined; + message.statusInactive = + object.statusInactive !== undefined && object.statusInactive !== null + ? DatasetStatus_StatusInactive.fromJSON(object.statusInactive) + : undefined; + message.statusError = + object.statusError !== undefined && object.statusError !== null + ? DatasetStatus_StatusError.fromJSON(object.statusError) + : undefined; + return message; + }, + + toJSON(message: DatasetStatus): unknown { + const obj: any = {}; + message.statusActive !== undefined && + (obj.statusActive = message.statusActive + ? DatasetStatus_StatusActive.toJSON(message.statusActive) + : undefined); + message.statusInactive !== undefined && + (obj.statusInactive = message.statusInactive + ? DatasetStatus_StatusInactive.toJSON(message.statusInactive) + : undefined); + message.statusError !== undefined && + (obj.statusError = message.statusError + ? DatasetStatus_StatusError.toJSON(message.statusError) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): DatasetStatus { + const message = { ...baseDatasetStatus } as DatasetStatus; + message.statusActive = + object.statusActive !== undefined && object.statusActive !== null + ? DatasetStatus_StatusActive.fromPartial(object.statusActive) + : undefined; + message.statusInactive = + object.statusInactive !== undefined && object.statusInactive !== null + ? DatasetStatus_StatusInactive.fromPartial(object.statusInactive) + : undefined; + message.statusError = + object.statusError !== undefined && object.statusError !== null + ? DatasetStatus_StatusError.fromPartial(object.statusError) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(DatasetStatus.$type, DatasetStatus); + +const baseDatasetStatus_StatusActive: object = { + $type: "yandex.cloud.datasphere.v2.DatasetStatus.StatusActive", +}; + +export const DatasetStatus_StatusActive = { + $type: "yandex.cloud.datasphere.v2.DatasetStatus.StatusActive" as const, + + encode( + _: DatasetStatus_StatusActive, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DatasetStatus_StatusActive { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDatasetStatus_StatusActive, + } as DatasetStatus_StatusActive; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): DatasetStatus_StatusActive { + const message = { + ...baseDatasetStatus_StatusActive, + } as DatasetStatus_StatusActive; + return message; + }, + + toJSON(_: DatasetStatus_StatusActive): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): DatasetStatus_StatusActive { + const message = { + ...baseDatasetStatus_StatusActive, + } as DatasetStatus_StatusActive; + return message; + }, +}; + +messageTypeRegistry.set( + DatasetStatus_StatusActive.$type, + DatasetStatus_StatusActive +); + +const baseDatasetStatus_StatusInactive: object = { + $type: "yandex.cloud.datasphere.v2.DatasetStatus.StatusInactive", +}; + +export const DatasetStatus_StatusInactive = { + $type: "yandex.cloud.datasphere.v2.DatasetStatus.StatusInactive" as const, + + encode( + _: DatasetStatus_StatusInactive, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DatasetStatus_StatusInactive { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDatasetStatus_StatusInactive, + } as DatasetStatus_StatusInactive; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): DatasetStatus_StatusInactive { + const message = { + ...baseDatasetStatus_StatusInactive, + } as DatasetStatus_StatusInactive; + return message; + }, + + toJSON(_: DatasetStatus_StatusInactive): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): DatasetStatus_StatusInactive { + const message = { + ...baseDatasetStatus_StatusInactive, + } as DatasetStatus_StatusInactive; + return message; + }, +}; + +messageTypeRegistry.set( + DatasetStatus_StatusInactive.$type, + DatasetStatus_StatusInactive +); + +const baseDatasetStatus_StatusError: object = { + $type: "yandex.cloud.datasphere.v2.DatasetStatus.StatusError", + error: "", +}; + +export const DatasetStatus_StatusError = { + $type: "yandex.cloud.datasphere.v2.DatasetStatus.StatusError" as const, + + encode( + message: DatasetStatus_StatusError, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.error !== "") { + writer.uint32(10).string(message.error); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DatasetStatus_StatusError { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDatasetStatus_StatusError, + } as DatasetStatus_StatusError; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.error = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DatasetStatus_StatusError { + const message = { + ...baseDatasetStatus_StatusError, + } as DatasetStatus_StatusError; + message.error = + object.error !== undefined && object.error !== null + ? String(object.error) + : ""; + return message; + }, + + toJSON(message: DatasetStatus_StatusError): unknown { + const obj: any = {}; + message.error !== undefined && (obj.error = message.error); + return obj; + }, + + fromPartial, I>>( + object: I + ): DatasetStatus_StatusError { + const message = { + ...baseDatasetStatus_StatusError, + } as DatasetStatus_StatusError; + message.error = object.error ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DatasetStatus_StatusError.$type, + DatasetStatus_StatusError +); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datasphere/v2/project.ts b/src/generated/yandex/cloud/datasphere/v2/project.ts new file mode 100644 index 00000000..23ac65dd --- /dev/null +++ b/src/generated/yandex/cloud/datasphere/v2/project.ts @@ -0,0 +1,814 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; +import { Int64Value } from "../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.datasphere.v2"; + +/** A Project resource. */ +export interface Project { + $type: "yandex.cloud.datasphere.v2.Project"; + /** ID of the project. */ + id: string; + createdAt?: Date; + /** Name of the project. 1-63 characters long. */ + name: string; + /** Description of the project. 0-256 characters long. */ + description: string; + labels: { [key: string]: string }; + createdById: string; + /** Settings of the project. */ + settings?: Project_Settings; + /** Limits of the project. */ + limits?: Project_Limits; + /** ID of the community that the project belongs to. */ + communityId: string; +} + +export interface Project_Settings { + $type: "yandex.cloud.datasphere.v2.Project.Settings"; + /** ID of the service account, on whose behalf all operations with clusters will be performed. */ + serviceAccountId: string; + /** + * ID of the subnet where the DataProc cluster resides. + * Currently only subnets created in the availability zone ru-central1-a are supported. + */ + subnetId: string; + /** ID of the DataProc cluster. */ + dataProcClusterId: string; + /** Commit mode that is assigned to the project. */ + commitMode: Project_Settings_CommitMode; + /** Network interfaces security groups. */ + securityGroupIds: string[]; + /** Is early access preview enabled for the project. */ + earlyAccess: boolean; + /** Project IDE. */ + ide: Project_Settings_Ide; + /** Default project folder ID. */ + defaultFolderId: string; + /** Timeout to automatically stop stale executions. */ + staleExecTimeoutMode: Project_Settings_StaleExecutionTimeoutMode; +} + +export enum Project_Settings_CommitMode { + COMMIT_MODE_UNSPECIFIED = 0, + /** STANDARD - Commit happens after the execution of a cell or group of cells or after completion with an error. */ + STANDARD = 1, + /** + * AUTO - Commit happens periodically. + * Also, automatic saving of state occurs when switching to another type of computing resource. + */ + AUTO = 2, + UNRECOGNIZED = -1, +} + +export function project_Settings_CommitModeFromJSON( + object: any +): Project_Settings_CommitMode { + switch (object) { + case 0: + case "COMMIT_MODE_UNSPECIFIED": + return Project_Settings_CommitMode.COMMIT_MODE_UNSPECIFIED; + case 1: + case "STANDARD": + return Project_Settings_CommitMode.STANDARD; + case 2: + case "AUTO": + return Project_Settings_CommitMode.AUTO; + case -1: + case "UNRECOGNIZED": + default: + return Project_Settings_CommitMode.UNRECOGNIZED; + } +} + +export function project_Settings_CommitModeToJSON( + object: Project_Settings_CommitMode +): string { + switch (object) { + case Project_Settings_CommitMode.COMMIT_MODE_UNSPECIFIED: + return "COMMIT_MODE_UNSPECIFIED"; + case Project_Settings_CommitMode.STANDARD: + return "STANDARD"; + case Project_Settings_CommitMode.AUTO: + return "AUTO"; + default: + return "UNKNOWN"; + } +} + +export enum Project_Settings_Ide { + IDE_UNSPECIFIED = 0, + /** JUPYTER_LAB - Project running on JupyterLab IDE. */ + JUPYTER_LAB = 1, + UNRECOGNIZED = -1, +} + +export function project_Settings_IdeFromJSON( + object: any +): Project_Settings_Ide { + switch (object) { + case 0: + case "IDE_UNSPECIFIED": + return Project_Settings_Ide.IDE_UNSPECIFIED; + case 1: + case "JUPYTER_LAB": + return Project_Settings_Ide.JUPYTER_LAB; + case -1: + case "UNRECOGNIZED": + default: + return Project_Settings_Ide.UNRECOGNIZED; + } +} + +export function project_Settings_IdeToJSON( + object: Project_Settings_Ide +): string { + switch (object) { + case Project_Settings_Ide.IDE_UNSPECIFIED: + return "IDE_UNSPECIFIED"; + case Project_Settings_Ide.JUPYTER_LAB: + return "JUPYTER_LAB"; + default: + return "UNKNOWN"; + } +} + +export enum Project_Settings_StaleExecutionTimeoutMode { + STALE_EXECUTION_TIMEOUT_MODE_UNSPECIFIED = 0, + /** ONE_HOUR - Setting to automatically stop stale execution after one hour with low consumption. */ + ONE_HOUR = 1, + /** THREE_HOURS - Setting to automatically stop stale execution after three hours with low consumption. */ + THREE_HOURS = 2, + /** NO_TIMEOUT - Setting to never automatically stop stale executions. */ + NO_TIMEOUT = 3, + UNRECOGNIZED = -1, +} + +export function project_Settings_StaleExecutionTimeoutModeFromJSON( + object: any +): Project_Settings_StaleExecutionTimeoutMode { + switch (object) { + case 0: + case "STALE_EXECUTION_TIMEOUT_MODE_UNSPECIFIED": + return Project_Settings_StaleExecutionTimeoutMode.STALE_EXECUTION_TIMEOUT_MODE_UNSPECIFIED; + case 1: + case "ONE_HOUR": + return Project_Settings_StaleExecutionTimeoutMode.ONE_HOUR; + case 2: + case "THREE_HOURS": + return Project_Settings_StaleExecutionTimeoutMode.THREE_HOURS; + case 3: + case "NO_TIMEOUT": + return Project_Settings_StaleExecutionTimeoutMode.NO_TIMEOUT; + case -1: + case "UNRECOGNIZED": + default: + return Project_Settings_StaleExecutionTimeoutMode.UNRECOGNIZED; + } +} + +export function project_Settings_StaleExecutionTimeoutModeToJSON( + object: Project_Settings_StaleExecutionTimeoutMode +): string { + switch (object) { + case Project_Settings_StaleExecutionTimeoutMode.STALE_EXECUTION_TIMEOUT_MODE_UNSPECIFIED: + return "STALE_EXECUTION_TIMEOUT_MODE_UNSPECIFIED"; + case Project_Settings_StaleExecutionTimeoutMode.ONE_HOUR: + return "ONE_HOUR"; + case Project_Settings_StaleExecutionTimeoutMode.THREE_HOURS: + return "THREE_HOURS"; + case Project_Settings_StaleExecutionTimeoutMode.NO_TIMEOUT: + return "NO_TIMEOUT"; + default: + return "UNKNOWN"; + } +} + +export interface Project_Limits { + $type: "yandex.cloud.datasphere.v2.Project.Limits"; + /** The number of units that can be spent per hour. */ + maxUnitsPerHour?: number; + /** The number of units that can be spent on the one execution. */ + maxUnitsPerExecution?: number; +} + +export interface Project_LabelsEntry { + $type: "yandex.cloud.datasphere.v2.Project.LabelsEntry"; + key: string; + value: string; +} + +const baseProject: object = { + $type: "yandex.cloud.datasphere.v2.Project", + id: "", + name: "", + description: "", + createdById: "", + communityId: "", +}; + +export const Project = { + $type: "yandex.cloud.datasphere.v2.Project" as const, + + encode( + message: Project, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(18).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + Project_LabelsEntry.encode( + { + $type: "yandex.cloud.datasphere.v2.Project.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + if (message.createdById !== "") { + writer.uint32(50).string(message.createdById); + } + if (message.settings !== undefined) { + Project_Settings.encode( + message.settings, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.limits !== undefined) { + Project_Limits.encode(message.limits, writer.uint32(66).fork()).ldelim(); + } + if (message.communityId !== "") { + writer.uint32(90).string(message.communityId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Project { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseProject } as Project; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = Project_LabelsEntry.decode(reader, reader.uint32()); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + case 6: + message.createdById = reader.string(); + break; + case 7: + message.settings = Project_Settings.decode(reader, reader.uint32()); + break; + case 8: + message.limits = Project_Limits.decode(reader, reader.uint32()); + break; + case 11: + message.communityId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Project { + const message = { ...baseProject } as Project; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.createdById = + object.createdById !== undefined && object.createdById !== null + ? String(object.createdById) + : ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? Project_Settings.fromJSON(object.settings) + : undefined; + message.limits = + object.limits !== undefined && object.limits !== null + ? Project_Limits.fromJSON(object.limits) + : undefined; + message.communityId = + object.communityId !== undefined && object.communityId !== null + ? String(object.communityId) + : ""; + return message; + }, + + toJSON(message: Project): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.createdById !== undefined && + (obj.createdById = message.createdById); + message.settings !== undefined && + (obj.settings = message.settings + ? Project_Settings.toJSON(message.settings) + : undefined); + message.limits !== undefined && + (obj.limits = message.limits + ? Project_Limits.toJSON(message.limits) + : undefined); + message.communityId !== undefined && + (obj.communityId = message.communityId); + return obj; + }, + + fromPartial, I>>(object: I): Project { + const message = { ...baseProject } as Project; + message.id = object.id ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.createdById = object.createdById ?? ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? Project_Settings.fromPartial(object.settings) + : undefined; + message.limits = + object.limits !== undefined && object.limits !== null + ? Project_Limits.fromPartial(object.limits) + : undefined; + message.communityId = object.communityId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Project.$type, Project); + +const baseProject_Settings: object = { + $type: "yandex.cloud.datasphere.v2.Project.Settings", + serviceAccountId: "", + subnetId: "", + dataProcClusterId: "", + commitMode: 0, + securityGroupIds: "", + earlyAccess: false, + ide: 0, + defaultFolderId: "", + staleExecTimeoutMode: 0, +}; + +export const Project_Settings = { + $type: "yandex.cloud.datasphere.v2.Project.Settings" as const, + + encode( + message: Project_Settings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.serviceAccountId !== "") { + writer.uint32(10).string(message.serviceAccountId); + } + if (message.subnetId !== "") { + writer.uint32(18).string(message.subnetId); + } + if (message.dataProcClusterId !== "") { + writer.uint32(26).string(message.dataProcClusterId); + } + if (message.commitMode !== 0) { + writer.uint32(32).int32(message.commitMode); + } + for (const v of message.securityGroupIds) { + writer.uint32(42).string(v!); + } + if (message.earlyAccess === true) { + writer.uint32(48).bool(message.earlyAccess); + } + if (message.ide !== 0) { + writer.uint32(56).int32(message.ide); + } + if (message.defaultFolderId !== "") { + writer.uint32(66).string(message.defaultFolderId); + } + if (message.staleExecTimeoutMode !== 0) { + writer.uint32(72).int32(message.staleExecTimeoutMode); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Project_Settings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseProject_Settings } as Project_Settings; + message.securityGroupIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.serviceAccountId = reader.string(); + break; + case 2: + message.subnetId = reader.string(); + break; + case 3: + message.dataProcClusterId = reader.string(); + break; + case 4: + message.commitMode = reader.int32() as any; + break; + case 5: + message.securityGroupIds.push(reader.string()); + break; + case 6: + message.earlyAccess = reader.bool(); + break; + case 7: + message.ide = reader.int32() as any; + break; + case 8: + message.defaultFolderId = reader.string(); + break; + case 9: + message.staleExecTimeoutMode = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Project_Settings { + const message = { ...baseProject_Settings } as Project_Settings; + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + message.dataProcClusterId = + object.dataProcClusterId !== undefined && + object.dataProcClusterId !== null + ? String(object.dataProcClusterId) + : ""; + message.commitMode = + object.commitMode !== undefined && object.commitMode !== null + ? project_Settings_CommitModeFromJSON(object.commitMode) + : 0; + message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => + String(e) + ); + message.earlyAccess = + object.earlyAccess !== undefined && object.earlyAccess !== null + ? Boolean(object.earlyAccess) + : false; + message.ide = + object.ide !== undefined && object.ide !== null + ? project_Settings_IdeFromJSON(object.ide) + : 0; + message.defaultFolderId = + object.defaultFolderId !== undefined && object.defaultFolderId !== null + ? String(object.defaultFolderId) + : ""; + message.staleExecTimeoutMode = + object.staleExecTimeoutMode !== undefined && + object.staleExecTimeoutMode !== null + ? project_Settings_StaleExecutionTimeoutModeFromJSON( + object.staleExecTimeoutMode + ) + : 0; + return message; + }, + + toJSON(message: Project_Settings): unknown { + const obj: any = {}; + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + message.dataProcClusterId !== undefined && + (obj.dataProcClusterId = message.dataProcClusterId); + message.commitMode !== undefined && + (obj.commitMode = project_Settings_CommitModeToJSON(message.commitMode)); + if (message.securityGroupIds) { + obj.securityGroupIds = message.securityGroupIds.map((e) => e); + } else { + obj.securityGroupIds = []; + } + message.earlyAccess !== undefined && + (obj.earlyAccess = message.earlyAccess); + message.ide !== undefined && + (obj.ide = project_Settings_IdeToJSON(message.ide)); + message.defaultFolderId !== undefined && + (obj.defaultFolderId = message.defaultFolderId); + message.staleExecTimeoutMode !== undefined && + (obj.staleExecTimeoutMode = + project_Settings_StaleExecutionTimeoutModeToJSON( + message.staleExecTimeoutMode + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): Project_Settings { + const message = { ...baseProject_Settings } as Project_Settings; + message.serviceAccountId = object.serviceAccountId ?? ""; + message.subnetId = object.subnetId ?? ""; + message.dataProcClusterId = object.dataProcClusterId ?? ""; + message.commitMode = object.commitMode ?? 0; + message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.earlyAccess = object.earlyAccess ?? false; + message.ide = object.ide ?? 0; + message.defaultFolderId = object.defaultFolderId ?? ""; + message.staleExecTimeoutMode = object.staleExecTimeoutMode ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Project_Settings.$type, Project_Settings); + +const baseProject_Limits: object = { + $type: "yandex.cloud.datasphere.v2.Project.Limits", +}; + +export const Project_Limits = { + $type: "yandex.cloud.datasphere.v2.Project.Limits" as const, + + encode( + message: Project_Limits, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxUnitsPerHour !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxUnitsPerHour!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.maxUnitsPerExecution !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxUnitsPerExecution!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Project_Limits { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseProject_Limits } as Project_Limits; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxUnitsPerHour = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.maxUnitsPerExecution = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Project_Limits { + const message = { ...baseProject_Limits } as Project_Limits; + message.maxUnitsPerHour = + object.maxUnitsPerHour !== undefined && object.maxUnitsPerHour !== null + ? Number(object.maxUnitsPerHour) + : undefined; + message.maxUnitsPerExecution = + object.maxUnitsPerExecution !== undefined && + object.maxUnitsPerExecution !== null + ? Number(object.maxUnitsPerExecution) + : undefined; + return message; + }, + + toJSON(message: Project_Limits): unknown { + const obj: any = {}; + message.maxUnitsPerHour !== undefined && + (obj.maxUnitsPerHour = message.maxUnitsPerHour); + message.maxUnitsPerExecution !== undefined && + (obj.maxUnitsPerExecution = message.maxUnitsPerExecution); + return obj; + }, + + fromPartial, I>>( + object: I + ): Project_Limits { + const message = { ...baseProject_Limits } as Project_Limits; + message.maxUnitsPerHour = object.maxUnitsPerHour ?? undefined; + message.maxUnitsPerExecution = object.maxUnitsPerExecution ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Project_Limits.$type, Project_Limits); + +const baseProject_LabelsEntry: object = { + $type: "yandex.cloud.datasphere.v2.Project.LabelsEntry", + key: "", + value: "", +}; + +export const Project_LabelsEntry = { + $type: "yandex.cloud.datasphere.v2.Project.LabelsEntry" as const, + + encode( + message: Project_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Project_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseProject_LabelsEntry } as Project_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Project_LabelsEntry { + const message = { ...baseProject_LabelsEntry } as Project_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Project_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): Project_LabelsEntry { + const message = { ...baseProject_LabelsEntry } as Project_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Project_LabelsEntry.$type, Project_LabelsEntry); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datasphere/v2/project_service.ts b/src/generated/yandex/cloud/datasphere/v2/project_service.ts new file mode 100644 index 00000000..6ca9f5c1 --- /dev/null +++ b/src/generated/yandex/cloud/datasphere/v2/project_service.ts @@ -0,0 +1,3409 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + Project_Settings, + Project_Limits, + Project, +} from "../../../../yandex/cloud/datasphere/v2/project"; +import { FieldMask } from "../../../../google/protobuf/field_mask"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; +import { + ListAccessBindingsRequest, + ListAccessBindingsResponse, + SetAccessBindingsRequest, + UpdateAccessBindingsRequest, +} from "../../../../yandex/cloud/access/access"; +import { Int64Value } from "../../../../google/protobuf/wrappers"; +import { Struct } from "../../../../google/protobuf/struct"; + +export const protobufPackage = "yandex.cloud.datasphere.v2"; + +export enum ExecutionStatus { + EXECUTION_STATUS_UNSPECIFIED = 0, + /** OK - Execution finished successfully. */ + OK = 1, + /** ERROR - Execution ended with error. */ + ERROR = 2, + /** ABORTED - Execution was aborted. */ + ABORTED = 3, + UNRECOGNIZED = -1, +} + +export function executionStatusFromJSON(object: any): ExecutionStatus { + switch (object) { + case 0: + case "EXECUTION_STATUS_UNSPECIFIED": + return ExecutionStatus.EXECUTION_STATUS_UNSPECIFIED; + case 1: + case "OK": + return ExecutionStatus.OK; + case 2: + case "ERROR": + return ExecutionStatus.ERROR; + case 3: + case "ABORTED": + return ExecutionStatus.ABORTED; + case -1: + case "UNRECOGNIZED": + default: + return ExecutionStatus.UNRECOGNIZED; + } +} + +export function executionStatusToJSON(object: ExecutionStatus): string { + switch (object) { + case ExecutionStatus.EXECUTION_STATUS_UNSPECIFIED: + return "EXECUTION_STATUS_UNSPECIFIED"; + case ExecutionStatus.OK: + return "OK"; + case ExecutionStatus.ERROR: + return "ERROR"; + case ExecutionStatus.ABORTED: + return "ABORTED"; + default: + return "UNKNOWN"; + } +} + +export interface CreateProjectRequest { + $type: "yandex.cloud.datasphere.v2.CreateProjectRequest"; + /** ID of the community to create a project in. */ + communityId: string; + /** Name of the project. 0-63 characters long. */ + name: string; + /** Description of the project. 0-256 characters long. */ + description: string; + /** Labels of the project. */ + labels: { [key: string]: string }; + /** Settings of the project. */ + settings?: Project_Settings; + /** Limits of the project. */ + limits?: Project_Limits; +} + +export interface CreateProjectRequest_LabelsEntry { + $type: "yandex.cloud.datasphere.v2.CreateProjectRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateProjectMetadata { + $type: "yandex.cloud.datasphere.v2.CreateProjectMetadata"; + /** ID of the project that is being created. */ + projectId: string; +} + +export interface UpdateProjectRequest { + $type: "yandex.cloud.datasphere.v2.UpdateProjectRequest"; + /** + * ID of the Project resource to update. + * To get the project ID use a [ProjectService.List] request. + */ + projectId: string; + /** Field mask that specifies which fields of the Project resource are going to be updated. */ + updateMask?: FieldMask; + /** Name of the project. 0-63 characters long. */ + name: string; + /** Description of the project. 0-256 characters long. */ + description: string; + /** Labels of the project. */ + labels: { [key: string]: string }; + /** Settings of the project. */ + settings?: Project_Settings; + /** Limits of the project. */ + limits?: Project_Limits; +} + +export interface UpdateProjectRequest_LabelsEntry { + $type: "yandex.cloud.datasphere.v2.UpdateProjectRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateProjectMetadata { + $type: "yandex.cloud.datasphere.v2.UpdateProjectMetadata"; + /** ID of the project that is being updated. */ + projectId: string; +} + +export interface DeleteProjectRequest { + $type: "yandex.cloud.datasphere.v2.DeleteProjectRequest"; + /** + * ID of the Project resource to delete. + * To get the project ID use a [ProjectService.List] request. + */ + projectId: string; +} + +export interface DeleteProjectMetadata { + $type: "yandex.cloud.datasphere.v2.DeleteProjectMetadata"; + /** ID of the project that is being deleted. */ + projectId: string; +} + +export interface OpenProjectRequest { + $type: "yandex.cloud.datasphere.v2.OpenProjectRequest"; + /** + * ID of the Project resource to open. + * To get the project ID use a [ProjectService.List] request. + */ + projectId: string; +} + +export interface OpenProjectMetadata { + $type: "yandex.cloud.datasphere.v2.OpenProjectMetadata"; + /** ID of the project that is being opened. */ + projectId: string; + /** Project opening status. */ + status: OpenProjectMetadata_OpenProjectStatus; +} + +export enum OpenProjectMetadata_OpenProjectStatus { + OPEN_PROJECT_STATUS_UNSPECIFIED = 0, + /** OPEN_PROJECT_STATUS_CLOSING_IDE - Closing previous IDE instance. */ + OPEN_PROJECT_STATUS_CLOSING_IDE = 1, + /** OPEN_PROJECT_STATUS_UNZIPPING_PROJECT - Unzipping project. */ + OPEN_PROJECT_STATUS_UNZIPPING_PROJECT = 2, + /** OPEN_PROJECT_STATUS_ALLOCATING_VM - Allocating VM for the project. */ + OPEN_PROJECT_STATUS_ALLOCATING_VM = 3, + /** OPEN_PROJECT_STATUS_ALLOCATING_RESOURCES - Allocating resources for the project. */ + OPEN_PROJECT_STATUS_ALLOCATING_RESOURCES = 4, + /** OPEN_PROJECT_STATUS_STARTING_IDE - Starting IDE. */ + OPEN_PROJECT_STATUS_STARTING_IDE = 5, + /** OPEN_PROJECT_STATUS_APPLYING_CHECKPOINT - Applying checkpoint to project. */ + OPEN_PROJECT_STATUS_APPLYING_CHECKPOINT = 6, + /** OPEN_PROJECT_STATUS_UNKNOWN - Unknown open project status. */ + OPEN_PROJECT_STATUS_UNKNOWN = 7, + UNRECOGNIZED = -1, +} + +export function openProjectMetadata_OpenProjectStatusFromJSON( + object: any +): OpenProjectMetadata_OpenProjectStatus { + switch (object) { + case 0: + case "OPEN_PROJECT_STATUS_UNSPECIFIED": + return OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_UNSPECIFIED; + case 1: + case "OPEN_PROJECT_STATUS_CLOSING_IDE": + return OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_CLOSING_IDE; + case 2: + case "OPEN_PROJECT_STATUS_UNZIPPING_PROJECT": + return OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_UNZIPPING_PROJECT; + case 3: + case "OPEN_PROJECT_STATUS_ALLOCATING_VM": + return OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_ALLOCATING_VM; + case 4: + case "OPEN_PROJECT_STATUS_ALLOCATING_RESOURCES": + return OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_ALLOCATING_RESOURCES; + case 5: + case "OPEN_PROJECT_STATUS_STARTING_IDE": + return OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_STARTING_IDE; + case 6: + case "OPEN_PROJECT_STATUS_APPLYING_CHECKPOINT": + return OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_APPLYING_CHECKPOINT; + case 7: + case "OPEN_PROJECT_STATUS_UNKNOWN": + return OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_UNKNOWN; + case -1: + case "UNRECOGNIZED": + default: + return OpenProjectMetadata_OpenProjectStatus.UNRECOGNIZED; + } +} + +export function openProjectMetadata_OpenProjectStatusToJSON( + object: OpenProjectMetadata_OpenProjectStatus +): string { + switch (object) { + case OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_UNSPECIFIED: + return "OPEN_PROJECT_STATUS_UNSPECIFIED"; + case OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_CLOSING_IDE: + return "OPEN_PROJECT_STATUS_CLOSING_IDE"; + case OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_UNZIPPING_PROJECT: + return "OPEN_PROJECT_STATUS_UNZIPPING_PROJECT"; + case OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_ALLOCATING_VM: + return "OPEN_PROJECT_STATUS_ALLOCATING_VM"; + case OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_ALLOCATING_RESOURCES: + return "OPEN_PROJECT_STATUS_ALLOCATING_RESOURCES"; + case OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_STARTING_IDE: + return "OPEN_PROJECT_STATUS_STARTING_IDE"; + case OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_APPLYING_CHECKPOINT: + return "OPEN_PROJECT_STATUS_APPLYING_CHECKPOINT"; + case OpenProjectMetadata_OpenProjectStatus.OPEN_PROJECT_STATUS_UNKNOWN: + return "OPEN_PROJECT_STATUS_UNKNOWN"; + default: + return "UNKNOWN"; + } +} + +export interface OpenProjectResponse { + $type: "yandex.cloud.datasphere.v2.OpenProjectResponse"; + /** + * URL of the project that is being opened. + * Make GET request to [project_url] with sessionToken query parameter equals to [session_token] + * or POST request to [project_url] with sessionToken body parameter equals to [session_token] + * to fetch Datasphere web interface. + */ + projectUrl: string; + /** Session token of the project that is being opened. */ + sessionToken: string; +} + +export interface GetProjectRequest { + $type: "yandex.cloud.datasphere.v2.GetProjectRequest"; + /** + * ID of the Project resource to return. + * To get the project ID use a [ProjectService.List] request. + */ + projectId: string; +} + +export interface ListProjectsRequest { + $type: "yandex.cloud.datasphere.v2.ListProjectsRequest"; + /** ID of the community to list projects in. */ + communityId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], + * the service returns a [ListProjectsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListProjectsResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; + /** + * Name pattern to filter projects that are returned. + * Only projects with names matching the pattern will be returned. + */ + projectNamePattern: string; + /** + * User ID to filter projects that are returned. + * Only projects that are owned by specified user will be returned. + */ + ownedById: string; +} + +export interface ListProjectsResponse { + $type: "yandex.cloud.datasphere.v2.ListProjectsResponse"; + /** List of Project resources. */ + projects: Project[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListProjectsRequest.page_size], use + * the [next_page_token] as the value + * for the [ListProjectsRequest.page_token] query parameter + * in the next list request. Each subsequent list request will have its own + * [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface GetUnitBalanceRequest { + $type: "yandex.cloud.datasphere.v2.GetUnitBalanceRequest"; + /** ID of the project to return the unit balance for. */ + projectId: string; +} + +export interface GetUnitBalanceResponse { + $type: "yandex.cloud.datasphere.v2.GetUnitBalanceResponse"; + /** The number of units available to the project. */ + unitBalance?: number; +} + +export interface SetUnitBalanceRequest { + $type: "yandex.cloud.datasphere.v2.SetUnitBalanceRequest"; + /** ID of the project to set the unit balance for. */ + projectId: string; + /** The number of units available to the project. */ + unitBalance?: number; +} + +export interface SetUnitBalanceMetadata { + $type: "yandex.cloud.datasphere.v2.SetUnitBalanceMetadata"; + /** ID of the project which unit balance is set. */ + projectId: string; +} + +export interface ProjectExecutionRequest { + $type: "yandex.cloud.datasphere.v2.ProjectExecutionRequest"; + /** ID of the project to execute notebook/cell in. */ + projectId: string; + /** ID of the notebook to execute. */ + notebookId: string | undefined; + /** ID of the cell to execute. */ + cellId: string | undefined; + /** Values of input variables. */ + inputVariables?: { [key: string]: any }; + /** Names of output variables. */ + outputVariableNames: string[]; +} + +export interface ProjectExecutionMetadata { + $type: "yandex.cloud.datasphere.v2.ProjectExecutionMetadata"; + /** ID of the project in which notebook is being executed. */ + projectId: string; + /** ID of the notebook that is being executed */ + notebookId: string | undefined; + /** ID of the cell that is being executed */ + cellId: string | undefined; +} + +export interface ProjectExecutionResponse { + $type: "yandex.cloud.datasphere.v2.ProjectExecutionResponse"; + /** ID of the checkpoint resulting from the execution. */ + checkpointId: string; + /** Values of output variables resulting from the execution. */ + outputVariables?: { [key: string]: any }; + /** Execution final status. */ + executionStatus: ExecutionStatus; +} + +export interface CellOutputsRequest { + $type: "yandex.cloud.datasphere.v2.CellOutputsRequest"; + /** ID of the project to return cell outputs for. */ + projectId: string; + /** ID of the cell to return outputs for. */ + cellId: string; + /** ID of the checkpoint to return cell outputs for. */ + checkpointId: string; + /** Timestamp from which to return outputs. */ + startAt?: Date; +} + +export interface CellOutputsResponse { + $type: "yandex.cloud.datasphere.v2.CellOutputsResponse"; + /** List of outputs. */ + outputs: string[]; +} + +export interface GetStateVariablesRequest { + $type: "yandex.cloud.datasphere.v2.GetStateVariablesRequest"; + /** ID of the project, for which to return state variables. */ + projectId: string; + /** ID of the notebook, for which to return state variables. */ + notebookId: string; + /** Names of variables to return. */ + variableNames: string[]; + /** ID of the checkpoint, for which to return state variables. */ + checkpointId: string; +} + +export interface GetStateVariablesResponse { + $type: "yandex.cloud.datasphere.v2.GetStateVariablesResponse"; + /** Values of the specified variables. */ + variables?: { [key: string]: any }; +} + +export interface SetProjectAccessBindingsMetadata { + $type: "yandex.cloud.datasphere.v2.SetProjectAccessBindingsMetadata"; + /** ID of the project which access bindings are set. */ + projectId: string; +} + +export interface UpdateProjectAccessBindingsMetadata { + $type: "yandex.cloud.datasphere.v2.UpdateProjectAccessBindingsMetadata"; + /** ID of the project which access bindings are updated. */ + projectId: string; +} + +const baseCreateProjectRequest: object = { + $type: "yandex.cloud.datasphere.v2.CreateProjectRequest", + communityId: "", + name: "", + description: "", +}; + +export const CreateProjectRequest = { + $type: "yandex.cloud.datasphere.v2.CreateProjectRequest" as const, + + encode( + message: CreateProjectRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.communityId !== "") { + writer.uint32(10).string(message.communityId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateProjectRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.datasphere.v2.CreateProjectRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.settings !== undefined) { + Project_Settings.encode( + message.settings, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.limits !== undefined) { + Project_Limits.encode(message.limits, writer.uint32(50).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateProjectRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateProjectRequest } as CreateProjectRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.communityId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = CreateProjectRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.settings = Project_Settings.decode(reader, reader.uint32()); + break; + case 6: + message.limits = Project_Limits.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateProjectRequest { + const message = { ...baseCreateProjectRequest } as CreateProjectRequest; + message.communityId = + object.communityId !== undefined && object.communityId !== null + ? String(object.communityId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.settings = + object.settings !== undefined && object.settings !== null + ? Project_Settings.fromJSON(object.settings) + : undefined; + message.limits = + object.limits !== undefined && object.limits !== null + ? Project_Limits.fromJSON(object.limits) + : undefined; + return message; + }, + + toJSON(message: CreateProjectRequest): unknown { + const obj: any = {}; + message.communityId !== undefined && + (obj.communityId = message.communityId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.settings !== undefined && + (obj.settings = message.settings + ? Project_Settings.toJSON(message.settings) + : undefined); + message.limits !== undefined && + (obj.limits = message.limits + ? Project_Limits.toJSON(message.limits) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateProjectRequest { + const message = { ...baseCreateProjectRequest } as CreateProjectRequest; + message.communityId = object.communityId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.settings = + object.settings !== undefined && object.settings !== null + ? Project_Settings.fromPartial(object.settings) + : undefined; + message.limits = + object.limits !== undefined && object.limits !== null + ? Project_Limits.fromPartial(object.limits) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(CreateProjectRequest.$type, CreateProjectRequest); + +const baseCreateProjectRequest_LabelsEntry: object = { + $type: "yandex.cloud.datasphere.v2.CreateProjectRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateProjectRequest_LabelsEntry = { + $type: "yandex.cloud.datasphere.v2.CreateProjectRequest.LabelsEntry" as const, + + encode( + message: CreateProjectRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateProjectRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateProjectRequest_LabelsEntry, + } as CreateProjectRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateProjectRequest_LabelsEntry { + const message = { + ...baseCreateProjectRequest_LabelsEntry, + } as CreateProjectRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateProjectRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateProjectRequest_LabelsEntry { + const message = { + ...baseCreateProjectRequest_LabelsEntry, + } as CreateProjectRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateProjectRequest_LabelsEntry.$type, + CreateProjectRequest_LabelsEntry +); + +const baseCreateProjectMetadata: object = { + $type: "yandex.cloud.datasphere.v2.CreateProjectMetadata", + projectId: "", +}; + +export const CreateProjectMetadata = { + $type: "yandex.cloud.datasphere.v2.CreateProjectMetadata" as const, + + encode( + message: CreateProjectMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateProjectMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateProjectMetadata } as CreateProjectMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateProjectMetadata { + const message = { ...baseCreateProjectMetadata } as CreateProjectMetadata; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + return message; + }, + + toJSON(message: CreateProjectMetadata): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateProjectMetadata { + const message = { ...baseCreateProjectMetadata } as CreateProjectMetadata; + message.projectId = object.projectId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateProjectMetadata.$type, CreateProjectMetadata); + +const baseUpdateProjectRequest: object = { + $type: "yandex.cloud.datasphere.v2.UpdateProjectRequest", + projectId: "", + name: "", + description: "", +}; + +export const UpdateProjectRequest = { + $type: "yandex.cloud.datasphere.v2.UpdateProjectRequest" as const, + + encode( + message: UpdateProjectRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateProjectRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.datasphere.v2.UpdateProjectRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + if (message.settings !== undefined) { + Project_Settings.encode( + message.settings, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.limits !== undefined) { + Project_Limits.encode(message.limits, writer.uint32(58).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateProjectRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateProjectRequest } as UpdateProjectRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = UpdateProjectRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + case 6: + message.settings = Project_Settings.decode(reader, reader.uint32()); + break; + case 7: + message.limits = Project_Limits.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateProjectRequest { + const message = { ...baseUpdateProjectRequest } as UpdateProjectRequest; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.settings = + object.settings !== undefined && object.settings !== null + ? Project_Settings.fromJSON(object.settings) + : undefined; + message.limits = + object.limits !== undefined && object.limits !== null + ? Project_Limits.fromJSON(object.limits) + : undefined; + return message; + }, + + toJSON(message: UpdateProjectRequest): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.settings !== undefined && + (obj.settings = message.settings + ? Project_Settings.toJSON(message.settings) + : undefined); + message.limits !== undefined && + (obj.limits = message.limits + ? Project_Limits.toJSON(message.limits) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateProjectRequest { + const message = { ...baseUpdateProjectRequest } as UpdateProjectRequest; + message.projectId = object.projectId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.settings = + object.settings !== undefined && object.settings !== null + ? Project_Settings.fromPartial(object.settings) + : undefined; + message.limits = + object.limits !== undefined && object.limits !== null + ? Project_Limits.fromPartial(object.limits) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateProjectRequest.$type, UpdateProjectRequest); + +const baseUpdateProjectRequest_LabelsEntry: object = { + $type: "yandex.cloud.datasphere.v2.UpdateProjectRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateProjectRequest_LabelsEntry = { + $type: "yandex.cloud.datasphere.v2.UpdateProjectRequest.LabelsEntry" as const, + + encode( + message: UpdateProjectRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateProjectRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateProjectRequest_LabelsEntry, + } as UpdateProjectRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateProjectRequest_LabelsEntry { + const message = { + ...baseUpdateProjectRequest_LabelsEntry, + } as UpdateProjectRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateProjectRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateProjectRequest_LabelsEntry { + const message = { + ...baseUpdateProjectRequest_LabelsEntry, + } as UpdateProjectRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateProjectRequest_LabelsEntry.$type, + UpdateProjectRequest_LabelsEntry +); + +const baseUpdateProjectMetadata: object = { + $type: "yandex.cloud.datasphere.v2.UpdateProjectMetadata", + projectId: "", +}; + +export const UpdateProjectMetadata = { + $type: "yandex.cloud.datasphere.v2.UpdateProjectMetadata" as const, + + encode( + message: UpdateProjectMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateProjectMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateProjectMetadata } as UpdateProjectMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateProjectMetadata { + const message = { ...baseUpdateProjectMetadata } as UpdateProjectMetadata; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + return message; + }, + + toJSON(message: UpdateProjectMetadata): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateProjectMetadata { + const message = { ...baseUpdateProjectMetadata } as UpdateProjectMetadata; + message.projectId = object.projectId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateProjectMetadata.$type, UpdateProjectMetadata); + +const baseDeleteProjectRequest: object = { + $type: "yandex.cloud.datasphere.v2.DeleteProjectRequest", + projectId: "", +}; + +export const DeleteProjectRequest = { + $type: "yandex.cloud.datasphere.v2.DeleteProjectRequest" as const, + + encode( + message: DeleteProjectRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteProjectRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteProjectRequest } as DeleteProjectRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteProjectRequest { + const message = { ...baseDeleteProjectRequest } as DeleteProjectRequest; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + return message; + }, + + toJSON(message: DeleteProjectRequest): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteProjectRequest { + const message = { ...baseDeleteProjectRequest } as DeleteProjectRequest; + message.projectId = object.projectId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteProjectRequest.$type, DeleteProjectRequest); + +const baseDeleteProjectMetadata: object = { + $type: "yandex.cloud.datasphere.v2.DeleteProjectMetadata", + projectId: "", +}; + +export const DeleteProjectMetadata = { + $type: "yandex.cloud.datasphere.v2.DeleteProjectMetadata" as const, + + encode( + message: DeleteProjectMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteProjectMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteProjectMetadata } as DeleteProjectMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteProjectMetadata { + const message = { ...baseDeleteProjectMetadata } as DeleteProjectMetadata; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + return message; + }, + + toJSON(message: DeleteProjectMetadata): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteProjectMetadata { + const message = { ...baseDeleteProjectMetadata } as DeleteProjectMetadata; + message.projectId = object.projectId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteProjectMetadata.$type, DeleteProjectMetadata); + +const baseOpenProjectRequest: object = { + $type: "yandex.cloud.datasphere.v2.OpenProjectRequest", + projectId: "", +}; + +export const OpenProjectRequest = { + $type: "yandex.cloud.datasphere.v2.OpenProjectRequest" as const, + + encode( + message: OpenProjectRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): OpenProjectRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOpenProjectRequest } as OpenProjectRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OpenProjectRequest { + const message = { ...baseOpenProjectRequest } as OpenProjectRequest; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + return message; + }, + + toJSON(message: OpenProjectRequest): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + return obj; + }, + + fromPartial, I>>( + object: I + ): OpenProjectRequest { + const message = { ...baseOpenProjectRequest } as OpenProjectRequest; + message.projectId = object.projectId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(OpenProjectRequest.$type, OpenProjectRequest); + +const baseOpenProjectMetadata: object = { + $type: "yandex.cloud.datasphere.v2.OpenProjectMetadata", + projectId: "", + status: 0, +}; + +export const OpenProjectMetadata = { + $type: "yandex.cloud.datasphere.v2.OpenProjectMetadata" as const, + + encode( + message: OpenProjectMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + if (message.status !== 0) { + writer.uint32(16).int32(message.status); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): OpenProjectMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOpenProjectMetadata } as OpenProjectMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + case 2: + message.status = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OpenProjectMetadata { + const message = { ...baseOpenProjectMetadata } as OpenProjectMetadata; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + message.status = + object.status !== undefined && object.status !== null + ? openProjectMetadata_OpenProjectStatusFromJSON(object.status) + : 0; + return message; + }, + + toJSON(message: OpenProjectMetadata): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + message.status !== undefined && + (obj.status = openProjectMetadata_OpenProjectStatusToJSON( + message.status + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): OpenProjectMetadata { + const message = { ...baseOpenProjectMetadata } as OpenProjectMetadata; + message.projectId = object.projectId ?? ""; + message.status = object.status ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(OpenProjectMetadata.$type, OpenProjectMetadata); + +const baseOpenProjectResponse: object = { + $type: "yandex.cloud.datasphere.v2.OpenProjectResponse", + projectUrl: "", + sessionToken: "", +}; + +export const OpenProjectResponse = { + $type: "yandex.cloud.datasphere.v2.OpenProjectResponse" as const, + + encode( + message: OpenProjectResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectUrl !== "") { + writer.uint32(10).string(message.projectUrl); + } + if (message.sessionToken !== "") { + writer.uint32(18).string(message.sessionToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): OpenProjectResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOpenProjectResponse } as OpenProjectResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectUrl = reader.string(); + break; + case 2: + message.sessionToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OpenProjectResponse { + const message = { ...baseOpenProjectResponse } as OpenProjectResponse; + message.projectUrl = + object.projectUrl !== undefined && object.projectUrl !== null + ? String(object.projectUrl) + : ""; + message.sessionToken = + object.sessionToken !== undefined && object.sessionToken !== null + ? String(object.sessionToken) + : ""; + return message; + }, + + toJSON(message: OpenProjectResponse): unknown { + const obj: any = {}; + message.projectUrl !== undefined && (obj.projectUrl = message.projectUrl); + message.sessionToken !== undefined && + (obj.sessionToken = message.sessionToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): OpenProjectResponse { + const message = { ...baseOpenProjectResponse } as OpenProjectResponse; + message.projectUrl = object.projectUrl ?? ""; + message.sessionToken = object.sessionToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(OpenProjectResponse.$type, OpenProjectResponse); + +const baseGetProjectRequest: object = { + $type: "yandex.cloud.datasphere.v2.GetProjectRequest", + projectId: "", +}; + +export const GetProjectRequest = { + $type: "yandex.cloud.datasphere.v2.GetProjectRequest" as const, + + encode( + message: GetProjectRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetProjectRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetProjectRequest } as GetProjectRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetProjectRequest { + const message = { ...baseGetProjectRequest } as GetProjectRequest; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + return message; + }, + + toJSON(message: GetProjectRequest): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetProjectRequest { + const message = { ...baseGetProjectRequest } as GetProjectRequest; + message.projectId = object.projectId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetProjectRequest.$type, GetProjectRequest); + +const baseListProjectsRequest: object = { + $type: "yandex.cloud.datasphere.v2.ListProjectsRequest", + communityId: "", + pageSize: 0, + pageToken: "", + projectNamePattern: "", + ownedById: "", +}; + +export const ListProjectsRequest = { + $type: "yandex.cloud.datasphere.v2.ListProjectsRequest" as const, + + encode( + message: ListProjectsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.communityId !== "") { + writer.uint32(10).string(message.communityId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.projectNamePattern !== "") { + writer.uint32(34).string(message.projectNamePattern); + } + if (message.ownedById !== "") { + writer.uint32(42).string(message.ownedById); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListProjectsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListProjectsRequest } as ListProjectsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.communityId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.projectNamePattern = reader.string(); + break; + case 5: + message.ownedById = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListProjectsRequest { + const message = { ...baseListProjectsRequest } as ListProjectsRequest; + message.communityId = + object.communityId !== undefined && object.communityId !== null + ? String(object.communityId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.projectNamePattern = + object.projectNamePattern !== undefined && + object.projectNamePattern !== null + ? String(object.projectNamePattern) + : ""; + message.ownedById = + object.ownedById !== undefined && object.ownedById !== null + ? String(object.ownedById) + : ""; + return message; + }, + + toJSON(message: ListProjectsRequest): unknown { + const obj: any = {}; + message.communityId !== undefined && + (obj.communityId = message.communityId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.projectNamePattern !== undefined && + (obj.projectNamePattern = message.projectNamePattern); + message.ownedById !== undefined && (obj.ownedById = message.ownedById); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListProjectsRequest { + const message = { ...baseListProjectsRequest } as ListProjectsRequest; + message.communityId = object.communityId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.projectNamePattern = object.projectNamePattern ?? ""; + message.ownedById = object.ownedById ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListProjectsRequest.$type, ListProjectsRequest); + +const baseListProjectsResponse: object = { + $type: "yandex.cloud.datasphere.v2.ListProjectsResponse", + nextPageToken: "", +}; + +export const ListProjectsResponse = { + $type: "yandex.cloud.datasphere.v2.ListProjectsResponse" as const, + + encode( + message: ListProjectsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.projects) { + Project.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListProjectsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListProjectsResponse } as ListProjectsResponse; + message.projects = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projects.push(Project.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListProjectsResponse { + const message = { ...baseListProjectsResponse } as ListProjectsResponse; + message.projects = (object.projects ?? []).map((e: any) => + Project.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListProjectsResponse): unknown { + const obj: any = {}; + if (message.projects) { + obj.projects = message.projects.map((e) => + e ? Project.toJSON(e) : undefined + ); + } else { + obj.projects = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListProjectsResponse { + const message = { ...baseListProjectsResponse } as ListProjectsResponse; + message.projects = + object.projects?.map((e) => Project.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListProjectsResponse.$type, ListProjectsResponse); + +const baseGetUnitBalanceRequest: object = { + $type: "yandex.cloud.datasphere.v2.GetUnitBalanceRequest", + projectId: "", +}; + +export const GetUnitBalanceRequest = { + $type: "yandex.cloud.datasphere.v2.GetUnitBalanceRequest" as const, + + encode( + message: GetUnitBalanceRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetUnitBalanceRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetUnitBalanceRequest } as GetUnitBalanceRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetUnitBalanceRequest { + const message = { ...baseGetUnitBalanceRequest } as GetUnitBalanceRequest; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + return message; + }, + + toJSON(message: GetUnitBalanceRequest): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetUnitBalanceRequest { + const message = { ...baseGetUnitBalanceRequest } as GetUnitBalanceRequest; + message.projectId = object.projectId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetUnitBalanceRequest.$type, GetUnitBalanceRequest); + +const baseGetUnitBalanceResponse: object = { + $type: "yandex.cloud.datasphere.v2.GetUnitBalanceResponse", +}; + +export const GetUnitBalanceResponse = { + $type: "yandex.cloud.datasphere.v2.GetUnitBalanceResponse" as const, + + encode( + message: GetUnitBalanceResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.unitBalance !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.unitBalance! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetUnitBalanceResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetUnitBalanceResponse } as GetUnitBalanceResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.unitBalance = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetUnitBalanceResponse { + const message = { ...baseGetUnitBalanceResponse } as GetUnitBalanceResponse; + message.unitBalance = + object.unitBalance !== undefined && object.unitBalance !== null + ? Number(object.unitBalance) + : undefined; + return message; + }, + + toJSON(message: GetUnitBalanceResponse): unknown { + const obj: any = {}; + message.unitBalance !== undefined && + (obj.unitBalance = message.unitBalance); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetUnitBalanceResponse { + const message = { ...baseGetUnitBalanceResponse } as GetUnitBalanceResponse; + message.unitBalance = object.unitBalance ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(GetUnitBalanceResponse.$type, GetUnitBalanceResponse); + +const baseSetUnitBalanceRequest: object = { + $type: "yandex.cloud.datasphere.v2.SetUnitBalanceRequest", + projectId: "", +}; + +export const SetUnitBalanceRequest = { + $type: "yandex.cloud.datasphere.v2.SetUnitBalanceRequest" as const, + + encode( + message: SetUnitBalanceRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + if (message.unitBalance !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.unitBalance! }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SetUnitBalanceRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSetUnitBalanceRequest } as SetUnitBalanceRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + case 2: + message.unitBalance = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SetUnitBalanceRequest { + const message = { ...baseSetUnitBalanceRequest } as SetUnitBalanceRequest; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + message.unitBalance = + object.unitBalance !== undefined && object.unitBalance !== null + ? Number(object.unitBalance) + : undefined; + return message; + }, + + toJSON(message: SetUnitBalanceRequest): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + message.unitBalance !== undefined && + (obj.unitBalance = message.unitBalance); + return obj; + }, + + fromPartial, I>>( + object: I + ): SetUnitBalanceRequest { + const message = { ...baseSetUnitBalanceRequest } as SetUnitBalanceRequest; + message.projectId = object.projectId ?? ""; + message.unitBalance = object.unitBalance ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(SetUnitBalanceRequest.$type, SetUnitBalanceRequest); + +const baseSetUnitBalanceMetadata: object = { + $type: "yandex.cloud.datasphere.v2.SetUnitBalanceMetadata", + projectId: "", +}; + +export const SetUnitBalanceMetadata = { + $type: "yandex.cloud.datasphere.v2.SetUnitBalanceMetadata" as const, + + encode( + message: SetUnitBalanceMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SetUnitBalanceMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSetUnitBalanceMetadata } as SetUnitBalanceMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SetUnitBalanceMetadata { + const message = { ...baseSetUnitBalanceMetadata } as SetUnitBalanceMetadata; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + return message; + }, + + toJSON(message: SetUnitBalanceMetadata): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + return obj; + }, + + fromPartial, I>>( + object: I + ): SetUnitBalanceMetadata { + const message = { ...baseSetUnitBalanceMetadata } as SetUnitBalanceMetadata; + message.projectId = object.projectId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(SetUnitBalanceMetadata.$type, SetUnitBalanceMetadata); + +const baseProjectExecutionRequest: object = { + $type: "yandex.cloud.datasphere.v2.ProjectExecutionRequest", + projectId: "", + outputVariableNames: "", +}; + +export const ProjectExecutionRequest = { + $type: "yandex.cloud.datasphere.v2.ProjectExecutionRequest" as const, + + encode( + message: ProjectExecutionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + if (message.notebookId !== undefined) { + writer.uint32(18).string(message.notebookId); + } + if (message.cellId !== undefined) { + writer.uint32(26).string(message.cellId); + } + if (message.inputVariables !== undefined) { + Struct.encode( + Struct.wrap(message.inputVariables), + writer.uint32(34).fork() + ).ldelim(); + } + for (const v of message.outputVariableNames) { + writer.uint32(42).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ProjectExecutionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseProjectExecutionRequest, + } as ProjectExecutionRequest; + message.outputVariableNames = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + case 2: + message.notebookId = reader.string(); + break; + case 3: + message.cellId = reader.string(); + break; + case 4: + message.inputVariables = Struct.unwrap( + Struct.decode(reader, reader.uint32()) + ); + break; + case 5: + message.outputVariableNames.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ProjectExecutionRequest { + const message = { + ...baseProjectExecutionRequest, + } as ProjectExecutionRequest; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + message.notebookId = + object.notebookId !== undefined && object.notebookId !== null + ? String(object.notebookId) + : undefined; + message.cellId = + object.cellId !== undefined && object.cellId !== null + ? String(object.cellId) + : undefined; + message.inputVariables = + typeof object.inputVariables === "object" + ? object.inputVariables + : undefined; + message.outputVariableNames = (object.outputVariableNames ?? []).map( + (e: any) => String(e) + ); + return message; + }, + + toJSON(message: ProjectExecutionRequest): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + message.notebookId !== undefined && (obj.notebookId = message.notebookId); + message.cellId !== undefined && (obj.cellId = message.cellId); + message.inputVariables !== undefined && + (obj.inputVariables = message.inputVariables); + if (message.outputVariableNames) { + obj.outputVariableNames = message.outputVariableNames.map((e) => e); + } else { + obj.outputVariableNames = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ProjectExecutionRequest { + const message = { + ...baseProjectExecutionRequest, + } as ProjectExecutionRequest; + message.projectId = object.projectId ?? ""; + message.notebookId = object.notebookId ?? undefined; + message.cellId = object.cellId ?? undefined; + message.inputVariables = object.inputVariables ?? undefined; + message.outputVariableNames = + object.outputVariableNames?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(ProjectExecutionRequest.$type, ProjectExecutionRequest); + +const baseProjectExecutionMetadata: object = { + $type: "yandex.cloud.datasphere.v2.ProjectExecutionMetadata", + projectId: "", +}; + +export const ProjectExecutionMetadata = { + $type: "yandex.cloud.datasphere.v2.ProjectExecutionMetadata" as const, + + encode( + message: ProjectExecutionMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + if (message.notebookId !== undefined) { + writer.uint32(18).string(message.notebookId); + } + if (message.cellId !== undefined) { + writer.uint32(26).string(message.cellId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ProjectExecutionMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseProjectExecutionMetadata, + } as ProjectExecutionMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + case 2: + message.notebookId = reader.string(); + break; + case 3: + message.cellId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ProjectExecutionMetadata { + const message = { + ...baseProjectExecutionMetadata, + } as ProjectExecutionMetadata; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + message.notebookId = + object.notebookId !== undefined && object.notebookId !== null + ? String(object.notebookId) + : undefined; + message.cellId = + object.cellId !== undefined && object.cellId !== null + ? String(object.cellId) + : undefined; + return message; + }, + + toJSON(message: ProjectExecutionMetadata): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + message.notebookId !== undefined && (obj.notebookId = message.notebookId); + message.cellId !== undefined && (obj.cellId = message.cellId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ProjectExecutionMetadata { + const message = { + ...baseProjectExecutionMetadata, + } as ProjectExecutionMetadata; + message.projectId = object.projectId ?? ""; + message.notebookId = object.notebookId ?? undefined; + message.cellId = object.cellId ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + ProjectExecutionMetadata.$type, + ProjectExecutionMetadata +); + +const baseProjectExecutionResponse: object = { + $type: "yandex.cloud.datasphere.v2.ProjectExecutionResponse", + checkpointId: "", + executionStatus: 0, +}; + +export const ProjectExecutionResponse = { + $type: "yandex.cloud.datasphere.v2.ProjectExecutionResponse" as const, + + encode( + message: ProjectExecutionResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.checkpointId !== "") { + writer.uint32(10).string(message.checkpointId); + } + if (message.outputVariables !== undefined) { + Struct.encode( + Struct.wrap(message.outputVariables), + writer.uint32(18).fork() + ).ldelim(); + } + if (message.executionStatus !== 0) { + writer.uint32(24).int32(message.executionStatus); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ProjectExecutionResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseProjectExecutionResponse, + } as ProjectExecutionResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.checkpointId = reader.string(); + break; + case 2: + message.outputVariables = Struct.unwrap( + Struct.decode(reader, reader.uint32()) + ); + break; + case 3: + message.executionStatus = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ProjectExecutionResponse { + const message = { + ...baseProjectExecutionResponse, + } as ProjectExecutionResponse; + message.checkpointId = + object.checkpointId !== undefined && object.checkpointId !== null + ? String(object.checkpointId) + : ""; + message.outputVariables = + typeof object.outputVariables === "object" + ? object.outputVariables + : undefined; + message.executionStatus = + object.executionStatus !== undefined && object.executionStatus !== null + ? executionStatusFromJSON(object.executionStatus) + : 0; + return message; + }, + + toJSON(message: ProjectExecutionResponse): unknown { + const obj: any = {}; + message.checkpointId !== undefined && + (obj.checkpointId = message.checkpointId); + message.outputVariables !== undefined && + (obj.outputVariables = message.outputVariables); + message.executionStatus !== undefined && + (obj.executionStatus = executionStatusToJSON(message.executionStatus)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ProjectExecutionResponse { + const message = { + ...baseProjectExecutionResponse, + } as ProjectExecutionResponse; + message.checkpointId = object.checkpointId ?? ""; + message.outputVariables = object.outputVariables ?? undefined; + message.executionStatus = object.executionStatus ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + ProjectExecutionResponse.$type, + ProjectExecutionResponse +); + +const baseCellOutputsRequest: object = { + $type: "yandex.cloud.datasphere.v2.CellOutputsRequest", + projectId: "", + cellId: "", + checkpointId: "", +}; + +export const CellOutputsRequest = { + $type: "yandex.cloud.datasphere.v2.CellOutputsRequest" as const, + + encode( + message: CellOutputsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + if (message.cellId !== "") { + writer.uint32(18).string(message.cellId); + } + if (message.checkpointId !== "") { + writer.uint32(26).string(message.checkpointId); + } + if (message.startAt !== undefined) { + Timestamp.encode( + toTimestamp(message.startAt), + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CellOutputsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCellOutputsRequest } as CellOutputsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + case 2: + message.cellId = reader.string(); + break; + case 3: + message.checkpointId = reader.string(); + break; + case 4: + message.startAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CellOutputsRequest { + const message = { ...baseCellOutputsRequest } as CellOutputsRequest; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + message.cellId = + object.cellId !== undefined && object.cellId !== null + ? String(object.cellId) + : ""; + message.checkpointId = + object.checkpointId !== undefined && object.checkpointId !== null + ? String(object.checkpointId) + : ""; + message.startAt = + object.startAt !== undefined && object.startAt !== null + ? fromJsonTimestamp(object.startAt) + : undefined; + return message; + }, + + toJSON(message: CellOutputsRequest): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + message.cellId !== undefined && (obj.cellId = message.cellId); + message.checkpointId !== undefined && + (obj.checkpointId = message.checkpointId); + message.startAt !== undefined && + (obj.startAt = message.startAt.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): CellOutputsRequest { + const message = { ...baseCellOutputsRequest } as CellOutputsRequest; + message.projectId = object.projectId ?? ""; + message.cellId = object.cellId ?? ""; + message.checkpointId = object.checkpointId ?? ""; + message.startAt = object.startAt ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(CellOutputsRequest.$type, CellOutputsRequest); + +const baseCellOutputsResponse: object = { + $type: "yandex.cloud.datasphere.v2.CellOutputsResponse", + outputs: "", +}; + +export const CellOutputsResponse = { + $type: "yandex.cloud.datasphere.v2.CellOutputsResponse" as const, + + encode( + message: CellOutputsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.outputs) { + writer.uint32(10).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CellOutputsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCellOutputsResponse } as CellOutputsResponse; + message.outputs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.outputs.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CellOutputsResponse { + const message = { ...baseCellOutputsResponse } as CellOutputsResponse; + message.outputs = (object.outputs ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: CellOutputsResponse): unknown { + const obj: any = {}; + if (message.outputs) { + obj.outputs = message.outputs.map((e) => e); + } else { + obj.outputs = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): CellOutputsResponse { + const message = { ...baseCellOutputsResponse } as CellOutputsResponse; + message.outputs = object.outputs?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(CellOutputsResponse.$type, CellOutputsResponse); + +const baseGetStateVariablesRequest: object = { + $type: "yandex.cloud.datasphere.v2.GetStateVariablesRequest", + projectId: "", + notebookId: "", + variableNames: "", + checkpointId: "", +}; + +export const GetStateVariablesRequest = { + $type: "yandex.cloud.datasphere.v2.GetStateVariablesRequest" as const, + + encode( + message: GetStateVariablesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + if (message.notebookId !== "") { + writer.uint32(18).string(message.notebookId); + } + for (const v of message.variableNames) { + writer.uint32(26).string(v!); + } + if (message.checkpointId !== "") { + writer.uint32(34).string(message.checkpointId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetStateVariablesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetStateVariablesRequest, + } as GetStateVariablesRequest; + message.variableNames = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + case 2: + message.notebookId = reader.string(); + break; + case 3: + message.variableNames.push(reader.string()); + break; + case 4: + message.checkpointId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetStateVariablesRequest { + const message = { + ...baseGetStateVariablesRequest, + } as GetStateVariablesRequest; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + message.notebookId = + object.notebookId !== undefined && object.notebookId !== null + ? String(object.notebookId) + : ""; + message.variableNames = (object.variableNames ?? []).map((e: any) => + String(e) + ); + message.checkpointId = + object.checkpointId !== undefined && object.checkpointId !== null + ? String(object.checkpointId) + : ""; + return message; + }, + + toJSON(message: GetStateVariablesRequest): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + message.notebookId !== undefined && (obj.notebookId = message.notebookId); + if (message.variableNames) { + obj.variableNames = message.variableNames.map((e) => e); + } else { + obj.variableNames = []; + } + message.checkpointId !== undefined && + (obj.checkpointId = message.checkpointId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetStateVariablesRequest { + const message = { + ...baseGetStateVariablesRequest, + } as GetStateVariablesRequest; + message.projectId = object.projectId ?? ""; + message.notebookId = object.notebookId ?? ""; + message.variableNames = object.variableNames?.map((e) => e) || []; + message.checkpointId = object.checkpointId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GetStateVariablesRequest.$type, + GetStateVariablesRequest +); + +const baseGetStateVariablesResponse: object = { + $type: "yandex.cloud.datasphere.v2.GetStateVariablesResponse", +}; + +export const GetStateVariablesResponse = { + $type: "yandex.cloud.datasphere.v2.GetStateVariablesResponse" as const, + + encode( + message: GetStateVariablesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.variables !== undefined) { + Struct.encode( + Struct.wrap(message.variables), + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetStateVariablesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetStateVariablesResponse, + } as GetStateVariablesResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.variables = Struct.unwrap( + Struct.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetStateVariablesResponse { + const message = { + ...baseGetStateVariablesResponse, + } as GetStateVariablesResponse; + message.variables = + typeof object.variables === "object" ? object.variables : undefined; + return message; + }, + + toJSON(message: GetStateVariablesResponse): unknown { + const obj: any = {}; + message.variables !== undefined && (obj.variables = message.variables); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetStateVariablesResponse { + const message = { + ...baseGetStateVariablesResponse, + } as GetStateVariablesResponse; + message.variables = object.variables ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + GetStateVariablesResponse.$type, + GetStateVariablesResponse +); + +const baseSetProjectAccessBindingsMetadata: object = { + $type: "yandex.cloud.datasphere.v2.SetProjectAccessBindingsMetadata", + projectId: "", +}; + +export const SetProjectAccessBindingsMetadata = { + $type: "yandex.cloud.datasphere.v2.SetProjectAccessBindingsMetadata" as const, + + encode( + message: SetProjectAccessBindingsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SetProjectAccessBindingsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSetProjectAccessBindingsMetadata, + } as SetProjectAccessBindingsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SetProjectAccessBindingsMetadata { + const message = { + ...baseSetProjectAccessBindingsMetadata, + } as SetProjectAccessBindingsMetadata; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + return message; + }, + + toJSON(message: SetProjectAccessBindingsMetadata): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): SetProjectAccessBindingsMetadata { + const message = { + ...baseSetProjectAccessBindingsMetadata, + } as SetProjectAccessBindingsMetadata; + message.projectId = object.projectId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + SetProjectAccessBindingsMetadata.$type, + SetProjectAccessBindingsMetadata +); + +const baseUpdateProjectAccessBindingsMetadata: object = { + $type: "yandex.cloud.datasphere.v2.UpdateProjectAccessBindingsMetadata", + projectId: "", +}; + +export const UpdateProjectAccessBindingsMetadata = { + $type: + "yandex.cloud.datasphere.v2.UpdateProjectAccessBindingsMetadata" as const, + + encode( + message: UpdateProjectAccessBindingsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.projectId !== "") { + writer.uint32(10).string(message.projectId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateProjectAccessBindingsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateProjectAccessBindingsMetadata, + } as UpdateProjectAccessBindingsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.projectId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateProjectAccessBindingsMetadata { + const message = { + ...baseUpdateProjectAccessBindingsMetadata, + } as UpdateProjectAccessBindingsMetadata; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + return message; + }, + + toJSON(message: UpdateProjectAccessBindingsMetadata): unknown { + const obj: any = {}; + message.projectId !== undefined && (obj.projectId = message.projectId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateProjectAccessBindingsMetadata { + const message = { + ...baseUpdateProjectAccessBindingsMetadata, + } as UpdateProjectAccessBindingsMetadata; + message.projectId = object.projectId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateProjectAccessBindingsMetadata.$type, + UpdateProjectAccessBindingsMetadata +); + +/** A set of methods for managing Project resources. */ +export const ProjectServiceService = { + /** Creates a project in the specified folder. */ + create: { + path: "/yandex.cloud.datasphere.v2.ProjectService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateProjectRequest) => + Buffer.from(CreateProjectRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateProjectRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified project. */ + update: { + path: "/yandex.cloud.datasphere.v2.ProjectService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateProjectRequest) => + Buffer.from(UpdateProjectRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateProjectRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified project. */ + delete: { + path: "/yandex.cloud.datasphere.v2.ProjectService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteProjectRequest) => + Buffer.from(DeleteProjectRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteProjectRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Opens the specified project. */ + open: { + path: "/yandex.cloud.datasphere.v2.ProjectService/Open", + requestStream: false, + responseStream: false, + requestSerialize: (value: OpenProjectRequest) => + Buffer.from(OpenProjectRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => OpenProjectRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Returns the specified project. */ + get: { + path: "/yandex.cloud.datasphere.v2.ProjectService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetProjectRequest) => + Buffer.from(GetProjectRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetProjectRequest.decode(value), + responseSerialize: (value: Project) => + Buffer.from(Project.encode(value).finish()), + responseDeserialize: (value: Buffer) => Project.decode(value), + }, + /** Lists projects for the specified community. */ + list: { + path: "/yandex.cloud.datasphere.v2.ProjectService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListProjectsRequest) => + Buffer.from(ListProjectsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListProjectsRequest.decode(value), + responseSerialize: (value: ListProjectsResponse) => + Buffer.from(ListProjectsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListProjectsResponse.decode(value), + }, + /** Returns the unit balance of the specified project. */ + getUnitBalance: { + path: "/yandex.cloud.datasphere.v2.ProjectService/GetUnitBalance", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetUnitBalanceRequest) => + Buffer.from(GetUnitBalanceRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetUnitBalanceRequest.decode(value), + responseSerialize: (value: GetUnitBalanceResponse) => + Buffer.from(GetUnitBalanceResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + GetUnitBalanceResponse.decode(value), + }, + /** Sets the unit balance of the specified project. */ + setUnitBalance: { + path: "/yandex.cloud.datasphere.v2.ProjectService/SetUnitBalance", + requestStream: false, + responseStream: false, + requestSerialize: (value: SetUnitBalanceRequest) => + Buffer.from(SetUnitBalanceRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => SetUnitBalanceRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Executes code in the specified cell or notebook. */ + execute: { + path: "/yandex.cloud.datasphere.v2.ProjectService/Execute", + requestStream: false, + responseStream: false, + requestSerialize: (value: ProjectExecutionRequest) => + Buffer.from(ProjectExecutionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ProjectExecutionRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Returns outputs of the specified cell. */ + getCellOutputs: { + path: "/yandex.cloud.datasphere.v2.ProjectService/GetCellOutputs", + requestStream: false, + responseStream: false, + requestSerialize: (value: CellOutputsRequest) => + Buffer.from(CellOutputsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CellOutputsRequest.decode(value), + responseSerialize: (value: CellOutputsResponse) => + Buffer.from(CellOutputsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => CellOutputsResponse.decode(value), + }, + /** Returns state variables of the specified notebook. */ + getStateVariables: { + path: "/yandex.cloud.datasphere.v2.ProjectService/GetStateVariables", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetStateVariablesRequest) => + Buffer.from(GetStateVariablesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + GetStateVariablesRequest.decode(value), + responseSerialize: (value: GetStateVariablesResponse) => + Buffer.from(GetStateVariablesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + GetStateVariablesResponse.decode(value), + }, + /** Lists access bindings for the project. */ + listAccessBindings: { + path: "/yandex.cloud.datasphere.v2.ProjectService/ListAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAccessBindingsRequest) => + Buffer.from(ListAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListAccessBindingsRequest.decode(value), + responseSerialize: (value: ListAccessBindingsResponse) => + Buffer.from(ListAccessBindingsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListAccessBindingsResponse.decode(value), + }, + /** Sets access bindings for the project. */ + setAccessBindings: { + path: "/yandex.cloud.datasphere.v2.ProjectService/SetAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: SetAccessBindingsRequest) => + Buffer.from(SetAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + SetAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates access bindings for the project. */ + updateAccessBindings: { + path: "/yandex.cloud.datasphere.v2.ProjectService/UpdateAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAccessBindingsRequest) => + Buffer.from(UpdateAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface ProjectServiceServer extends UntypedServiceImplementation { + /** Creates a project in the specified folder. */ + create: handleUnaryCall; + /** Updates the specified project. */ + update: handleUnaryCall; + /** Deletes the specified project. */ + delete: handleUnaryCall; + /** Opens the specified project. */ + open: handleUnaryCall; + /** Returns the specified project. */ + get: handleUnaryCall; + /** Lists projects for the specified community. */ + list: handleUnaryCall; + /** Returns the unit balance of the specified project. */ + getUnitBalance: handleUnaryCall< + GetUnitBalanceRequest, + GetUnitBalanceResponse + >; + /** Sets the unit balance of the specified project. */ + setUnitBalance: handleUnaryCall; + /** Executes code in the specified cell or notebook. */ + execute: handleUnaryCall; + /** Returns outputs of the specified cell. */ + getCellOutputs: handleUnaryCall; + /** Returns state variables of the specified notebook. */ + getStateVariables: handleUnaryCall< + GetStateVariablesRequest, + GetStateVariablesResponse + >; + /** Lists access bindings for the project. */ + listAccessBindings: handleUnaryCall< + ListAccessBindingsRequest, + ListAccessBindingsResponse + >; + /** Sets access bindings for the project. */ + setAccessBindings: handleUnaryCall; + /** Updates access bindings for the project. */ + updateAccessBindings: handleUnaryCall; +} + +export interface ProjectServiceClient extends Client { + /** Creates a project in the specified folder. */ + create( + request: CreateProjectRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateProjectRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateProjectRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified project. */ + update( + request: UpdateProjectRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateProjectRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateProjectRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified project. */ + delete( + request: DeleteProjectRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteProjectRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteProjectRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Opens the specified project. */ + open( + request: OpenProjectRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + open( + request: OpenProjectRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + open( + request: OpenProjectRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Returns the specified project. */ + get( + request: GetProjectRequest, + callback: (error: ServiceError | null, response: Project) => void + ): ClientUnaryCall; + get( + request: GetProjectRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Project) => void + ): ClientUnaryCall; + get( + request: GetProjectRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Project) => void + ): ClientUnaryCall; + /** Lists projects for the specified community. */ + list( + request: ListProjectsRequest, + callback: ( + error: ServiceError | null, + response: ListProjectsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListProjectsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListProjectsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListProjectsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListProjectsResponse + ) => void + ): ClientUnaryCall; + /** Returns the unit balance of the specified project. */ + getUnitBalance( + request: GetUnitBalanceRequest, + callback: ( + error: ServiceError | null, + response: GetUnitBalanceResponse + ) => void + ): ClientUnaryCall; + getUnitBalance( + request: GetUnitBalanceRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: GetUnitBalanceResponse + ) => void + ): ClientUnaryCall; + getUnitBalance( + request: GetUnitBalanceRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: GetUnitBalanceResponse + ) => void + ): ClientUnaryCall; + /** Sets the unit balance of the specified project. */ + setUnitBalance( + request: SetUnitBalanceRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setUnitBalance( + request: SetUnitBalanceRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setUnitBalance( + request: SetUnitBalanceRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Executes code in the specified cell or notebook. */ + execute( + request: ProjectExecutionRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + execute( + request: ProjectExecutionRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + execute( + request: ProjectExecutionRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Returns outputs of the specified cell. */ + getCellOutputs( + request: CellOutputsRequest, + callback: ( + error: ServiceError | null, + response: CellOutputsResponse + ) => void + ): ClientUnaryCall; + getCellOutputs( + request: CellOutputsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: CellOutputsResponse + ) => void + ): ClientUnaryCall; + getCellOutputs( + request: CellOutputsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: CellOutputsResponse + ) => void + ): ClientUnaryCall; + /** Returns state variables of the specified notebook. */ + getStateVariables( + request: GetStateVariablesRequest, + callback: ( + error: ServiceError | null, + response: GetStateVariablesResponse + ) => void + ): ClientUnaryCall; + getStateVariables( + request: GetStateVariablesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: GetStateVariablesResponse + ) => void + ): ClientUnaryCall; + getStateVariables( + request: GetStateVariablesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: GetStateVariablesResponse + ) => void + ): ClientUnaryCall; + /** Lists access bindings for the project. */ + listAccessBindings( + request: ListAccessBindingsRequest, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + /** Sets access bindings for the project. */ + setAccessBindings( + request: SetAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates access bindings for the project. */ + updateAccessBindings( + request: UpdateAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const ProjectServiceClient = makeGenericClientConstructor( + ProjectServiceService, + "yandex.cloud.datasphere.v2.ProjectService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ProjectServiceClient; + service: typeof ProjectServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datasphere/v2/secret.ts b/src/generated/yandex/cloud/datasphere/v2/secret.ts new file mode 100644 index 00000000..9e94ccdd --- /dev/null +++ b/src/generated/yandex/cloud/datasphere/v2/secret.ts @@ -0,0 +1,430 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.datasphere.v2"; + +export interface Secret { + $type: "yandex.cloud.datasphere.v2.Secret"; + /** ID of the secret. */ + id: string; + /** ID of the project. */ + projectId: string; + /** Time when secret was created. */ + createdAt?: Date; + /** Name of the secret. 1-63 characters long. */ + name: string; + /** Description of the secret. 0-256 characters long. */ + description: string; + /** Labels of the secret. */ + labels: { [key: string]: string }; + /** ID of the user who created secret. */ + createdById: string; + /** Time of last secret update. */ + updatedAt?: Date; +} + +export interface Secret_LabelsEntry { + $type: "yandex.cloud.datasphere.v2.Secret.LabelsEntry"; + key: string; + value: string; +} + +export interface DecryptedSecret { + $type: "yandex.cloud.datasphere.v2.DecryptedSecret"; + secret?: Secret; + /** Content of the secret. */ + content: string; +} + +const baseSecret: object = { + $type: "yandex.cloud.datasphere.v2.Secret", + id: "", + projectId: "", + name: "", + description: "", + createdById: "", +}; + +export const Secret = { + $type: "yandex.cloud.datasphere.v2.Secret" as const, + + encode( + message: Secret, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.projectId !== "") { + writer.uint32(18).string(message.projectId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(34).string(message.name); + } + if (message.description !== "") { + writer.uint32(42).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + Secret_LabelsEntry.encode( + { + $type: "yandex.cloud.datasphere.v2.Secret.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(50).fork() + ).ldelim(); + }); + if (message.createdById !== "") { + writer.uint32(58).string(message.createdById); + } + if (message.updatedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.updatedAt), + writer.uint32(74).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Secret { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSecret } as Secret; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.projectId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.name = reader.string(); + break; + case 5: + message.description = reader.string(); + break; + case 6: + const entry6 = Secret_LabelsEntry.decode(reader, reader.uint32()); + if (entry6.value !== undefined) { + message.labels[entry6.key] = entry6.value; + } + break; + case 7: + message.createdById = reader.string(); + break; + case 9: + message.updatedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Secret { + const message = { ...baseSecret } as Secret; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.projectId = + object.projectId !== undefined && object.projectId !== null + ? String(object.projectId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.createdById = + object.createdById !== undefined && object.createdById !== null + ? String(object.createdById) + : ""; + message.updatedAt = + object.updatedAt !== undefined && object.updatedAt !== null + ? fromJsonTimestamp(object.updatedAt) + : undefined; + return message; + }, + + toJSON(message: Secret): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.projectId !== undefined && (obj.projectId = message.projectId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.createdById !== undefined && + (obj.createdById = message.createdById); + message.updatedAt !== undefined && + (obj.updatedAt = message.updatedAt.toISOString()); + return obj; + }, + + fromPartial, I>>(object: I): Secret { + const message = { ...baseSecret } as Secret; + message.id = object.id ?? ""; + message.projectId = object.projectId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.createdById = object.createdById ?? ""; + message.updatedAt = object.updatedAt ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Secret.$type, Secret); + +const baseSecret_LabelsEntry: object = { + $type: "yandex.cloud.datasphere.v2.Secret.LabelsEntry", + key: "", + value: "", +}; + +export const Secret_LabelsEntry = { + $type: "yandex.cloud.datasphere.v2.Secret.LabelsEntry" as const, + + encode( + message: Secret_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Secret_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSecret_LabelsEntry } as Secret_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Secret_LabelsEntry { + const message = { ...baseSecret_LabelsEntry } as Secret_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Secret_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): Secret_LabelsEntry { + const message = { ...baseSecret_LabelsEntry } as Secret_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Secret_LabelsEntry.$type, Secret_LabelsEntry); + +const baseDecryptedSecret: object = { + $type: "yandex.cloud.datasphere.v2.DecryptedSecret", + content: "", +}; + +export const DecryptedSecret = { + $type: "yandex.cloud.datasphere.v2.DecryptedSecret" as const, + + encode( + message: DecryptedSecret, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.secret !== undefined) { + Secret.encode(message.secret, writer.uint32(10).fork()).ldelim(); + } + if (message.content !== "") { + writer.uint32(18).string(message.content); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DecryptedSecret { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDecryptedSecret } as DecryptedSecret; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.secret = Secret.decode(reader, reader.uint32()); + break; + case 2: + message.content = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DecryptedSecret { + const message = { ...baseDecryptedSecret } as DecryptedSecret; + message.secret = + object.secret !== undefined && object.secret !== null + ? Secret.fromJSON(object.secret) + : undefined; + message.content = + object.content !== undefined && object.content !== null + ? String(object.content) + : ""; + return message; + }, + + toJSON(message: DecryptedSecret): unknown { + const obj: any = {}; + message.secret !== undefined && + (obj.secret = message.secret ? Secret.toJSON(message.secret) : undefined); + message.content !== undefined && (obj.content = message.content); + return obj; + }, + + fromPartial, I>>( + object: I + ): DecryptedSecret { + const message = { ...baseDecryptedSecret } as DecryptedSecret; + message.secret = + object.secret !== undefined && object.secret !== null + ? Secret.fromPartial(object.secret) + : undefined; + message.content = object.content ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DecryptedSecret.$type, DecryptedSecret); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datasphere/v2/user.ts b/src/generated/yandex/cloud/datasphere/v2/user.ts new file mode 100644 index 00000000..5d6e8581 --- /dev/null +++ b/src/generated/yandex/cloud/datasphere/v2/user.ts @@ -0,0 +1,160 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.datasphere.v2"; + +export interface User { + $type: "yandex.cloud.datasphere.v2.User"; + /** ID of the user. */ + id: string; + /** Name of the user. */ + name: string; + /** Email of the user. */ + email: string; + /** URL to the user's profile picture. */ + picture: string; + /** An image content of the user's profile picture. */ + pictureData: string; +} + +const baseUser: object = { + $type: "yandex.cloud.datasphere.v2.User", + id: "", + name: "", + email: "", + picture: "", + pictureData: "", +}; + +export const User = { + $type: "yandex.cloud.datasphere.v2.User" as const, + + encode(message: User, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.email !== "") { + writer.uint32(26).string(message.email); + } + if (message.picture !== "") { + writer.uint32(34).string(message.picture); + } + if (message.pictureData !== "") { + writer.uint32(42).string(message.pictureData); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): User { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUser } as User; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.email = reader.string(); + break; + case 4: + message.picture = reader.string(); + break; + case 5: + message.pictureData = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): User { + const message = { ...baseUser } as User; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.email = + object.email !== undefined && object.email !== null + ? String(object.email) + : ""; + message.picture = + object.picture !== undefined && object.picture !== null + ? String(object.picture) + : ""; + message.pictureData = + object.pictureData !== undefined && object.pictureData !== null + ? String(object.pictureData) + : ""; + return message; + }, + + toJSON(message: User): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.name !== undefined && (obj.name = message.name); + message.email !== undefined && (obj.email = message.email); + message.picture !== undefined && (obj.picture = message.picture); + message.pictureData !== undefined && + (obj.pictureData = message.pictureData); + return obj; + }, + + fromPartial, I>>(object: I): User { + const message = { ...baseUser } as User; + message.id = object.id ?? ""; + message.name = object.name ?? ""; + message.email = object.email ?? ""; + message.picture = object.picture ?? ""; + message.pictureData = object.pictureData ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(User.$type, User); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datatransfer/index.ts b/src/generated/yandex/cloud/datatransfer/index.ts index 84b4cf4d..21d097cf 100644 --- a/src/generated/yandex/cloud/datatransfer/index.ts +++ b/src/generated/yandex/cloud/datatransfer/index.ts @@ -4,6 +4,10 @@ export * as transfer from './v1/transfer' export * as transfer_service from './v1/transfer_service' export * as clickhouse from './v1/endpoint/clickhouse' export * as common from './v1/endpoint/common' +export * as kafka from './v1/endpoint/kafka' export * as mongo from './v1/endpoint/mongo' export * as mysql from './v1/endpoint/mysql' -export * as postgres from './v1/endpoint/postgres' \ No newline at end of file +export * as parsers from './v1/endpoint/parsers' +export * as postgres from './v1/endpoint/postgres' +export * as serializers from './v1/endpoint/serializers' +export * as ydb from './v1/endpoint/ydb' \ No newline at end of file diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint.ts index 55aa40a5..080240a9 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint.ts @@ -10,6 +10,14 @@ import { PostgresSource, PostgresTarget, } from "../../../../yandex/cloud/datatransfer/v1/endpoint/postgres"; +import { + YdbSource, + YdbTarget, +} from "../../../../yandex/cloud/datatransfer/v1/endpoint/ydb"; +import { + KafkaSource, + KafkaTarget, +} from "../../../../yandex/cloud/datatransfer/v1/endpoint/kafka"; import { MongoSource, MongoTarget, @@ -41,11 +49,15 @@ export interface EndpointSettings { $type: "yandex.cloud.datatransfer.v1.EndpointSettings"; mysqlSource?: MysqlSource | undefined; postgresSource?: PostgresSource | undefined; + ydbSource?: YdbSource | undefined; + kafkaSource?: KafkaSource | undefined; mongoSource?: MongoSource | undefined; clickhouseSource?: ClickhouseSource | undefined; mysqlTarget?: MysqlTarget | undefined; postgresTarget?: PostgresTarget | undefined; clickhouseTarget?: ClickhouseTarget | undefined; + ydbTarget?: YdbTarget | undefined; + kafkaTarget?: KafkaTarget | undefined; mongoTarget?: MongoTarget | undefined; } @@ -304,6 +316,15 @@ export const EndpointSettings = { writer.uint32(18).fork() ).ldelim(); } + if (message.ydbSource !== undefined) { + YdbSource.encode(message.ydbSource, writer.uint32(26).fork()).ldelim(); + } + if (message.kafkaSource !== undefined) { + KafkaSource.encode( + message.kafkaSource, + writer.uint32(66).fork() + ).ldelim(); + } if (message.mongoSource !== undefined) { MongoSource.encode( message.mongoSource, @@ -334,6 +355,15 @@ export const EndpointSettings = { writer.uint32(834).fork() ).ldelim(); } + if (message.ydbTarget !== undefined) { + YdbTarget.encode(message.ydbTarget, writer.uint32(842).fork()).ldelim(); + } + if (message.kafkaTarget !== undefined) { + KafkaTarget.encode( + message.kafkaTarget, + writer.uint32(882).fork() + ).ldelim(); + } if (message.mongoTarget !== undefined) { MongoTarget.encode( message.mongoTarget, @@ -359,6 +389,12 @@ export const EndpointSettings = { reader.uint32() ); break; + case 3: + message.ydbSource = YdbSource.decode(reader, reader.uint32()); + break; + case 8: + message.kafkaSource = KafkaSource.decode(reader, reader.uint32()); + break; case 9: message.mongoSource = MongoSource.decode(reader, reader.uint32()); break; @@ -383,6 +419,12 @@ export const EndpointSettings = { reader.uint32() ); break; + case 105: + message.ydbTarget = YdbTarget.decode(reader, reader.uint32()); + break; + case 110: + message.kafkaTarget = KafkaTarget.decode(reader, reader.uint32()); + break; case 111: message.mongoTarget = MongoTarget.decode(reader, reader.uint32()); break; @@ -404,6 +446,14 @@ export const EndpointSettings = { object.postgresSource !== undefined && object.postgresSource !== null ? PostgresSource.fromJSON(object.postgresSource) : undefined; + message.ydbSource = + object.ydbSource !== undefined && object.ydbSource !== null + ? YdbSource.fromJSON(object.ydbSource) + : undefined; + message.kafkaSource = + object.kafkaSource !== undefined && object.kafkaSource !== null + ? KafkaSource.fromJSON(object.kafkaSource) + : undefined; message.mongoSource = object.mongoSource !== undefined && object.mongoSource !== null ? MongoSource.fromJSON(object.mongoSource) @@ -424,6 +474,14 @@ export const EndpointSettings = { object.clickhouseTarget !== undefined && object.clickhouseTarget !== null ? ClickhouseTarget.fromJSON(object.clickhouseTarget) : undefined; + message.ydbTarget = + object.ydbTarget !== undefined && object.ydbTarget !== null + ? YdbTarget.fromJSON(object.ydbTarget) + : undefined; + message.kafkaTarget = + object.kafkaTarget !== undefined && object.kafkaTarget !== null + ? KafkaTarget.fromJSON(object.kafkaTarget) + : undefined; message.mongoTarget = object.mongoTarget !== undefined && object.mongoTarget !== null ? MongoTarget.fromJSON(object.mongoTarget) @@ -441,6 +499,14 @@ export const EndpointSettings = { (obj.postgresSource = message.postgresSource ? PostgresSource.toJSON(message.postgresSource) : undefined); + message.ydbSource !== undefined && + (obj.ydbSource = message.ydbSource + ? YdbSource.toJSON(message.ydbSource) + : undefined); + message.kafkaSource !== undefined && + (obj.kafkaSource = message.kafkaSource + ? KafkaSource.toJSON(message.kafkaSource) + : undefined); message.mongoSource !== undefined && (obj.mongoSource = message.mongoSource ? MongoSource.toJSON(message.mongoSource) @@ -461,6 +527,14 @@ export const EndpointSettings = { (obj.clickhouseTarget = message.clickhouseTarget ? ClickhouseTarget.toJSON(message.clickhouseTarget) : undefined); + message.ydbTarget !== undefined && + (obj.ydbTarget = message.ydbTarget + ? YdbTarget.toJSON(message.ydbTarget) + : undefined); + message.kafkaTarget !== undefined && + (obj.kafkaTarget = message.kafkaTarget + ? KafkaTarget.toJSON(message.kafkaTarget) + : undefined); message.mongoTarget !== undefined && (obj.mongoTarget = message.mongoTarget ? MongoTarget.toJSON(message.mongoTarget) @@ -480,6 +554,14 @@ export const EndpointSettings = { object.postgresSource !== undefined && object.postgresSource !== null ? PostgresSource.fromPartial(object.postgresSource) : undefined; + message.ydbSource = + object.ydbSource !== undefined && object.ydbSource !== null + ? YdbSource.fromPartial(object.ydbSource) + : undefined; + message.kafkaSource = + object.kafkaSource !== undefined && object.kafkaSource !== null + ? KafkaSource.fromPartial(object.kafkaSource) + : undefined; message.mongoSource = object.mongoSource !== undefined && object.mongoSource !== null ? MongoSource.fromPartial(object.mongoSource) @@ -500,6 +582,14 @@ export const EndpointSettings = { object.clickhouseTarget !== undefined && object.clickhouseTarget !== null ? ClickhouseTarget.fromPartial(object.clickhouseTarget) : undefined; + message.ydbTarget = + object.ydbTarget !== undefined && object.ydbTarget !== null + ? YdbTarget.fromPartial(object.ydbTarget) + : undefined; + message.kafkaTarget = + object.kafkaTarget !== undefined && object.kafkaTarget !== null + ? KafkaTarget.fromPartial(object.kafkaTarget) + : undefined; message.mongoTarget = object.mongoTarget !== undefined && object.mongoTarget !== null ? MongoTarget.fromPartial(object.mongoTarget) diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/clickhouse.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/clickhouse.ts index 8b589474..0701b50c 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/clickhouse.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/clickhouse.ts @@ -16,6 +16,7 @@ export enum ClickhouseCleanupPolicy { CLICKHOUSE_CLEANUP_POLICY_UNSPECIFIED = 0, CLICKHOUSE_CLEANUP_POLICY_DISABLED = 1, CLICKHOUSE_CLEANUP_POLICY_DROP = 2, + CLICKHOUSE_CLEANUP_POLICY_TRUNCATE = 3, UNRECOGNIZED = -1, } @@ -32,6 +33,9 @@ export function clickhouseCleanupPolicyFromJSON( case 2: case "CLICKHOUSE_CLEANUP_POLICY_DROP": return ClickhouseCleanupPolicy.CLICKHOUSE_CLEANUP_POLICY_DROP; + case 3: + case "CLICKHOUSE_CLEANUP_POLICY_TRUNCATE": + return ClickhouseCleanupPolicy.CLICKHOUSE_CLEANUP_POLICY_TRUNCATE; case -1: case "UNRECOGNIZED": default: @@ -49,6 +53,8 @@ export function clickhouseCleanupPolicyToJSON( return "CLICKHOUSE_CLEANUP_POLICY_DISABLED"; case ClickhouseCleanupPolicy.CLICKHOUSE_CLEANUP_POLICY_DROP: return "CLICKHOUSE_CLEANUP_POLICY_DROP"; + case ClickhouseCleanupPolicy.CLICKHOUSE_CLEANUP_POLICY_TRUNCATE: + return "CLICKHOUSE_CLEANUP_POLICY_TRUNCATE"; default: return "UNKNOWN"; } @@ -72,6 +78,7 @@ export interface ClickhouseConnectionOptions { $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseConnectionOptions"; mdbClusterId: string | undefined; onPremise?: OnPremiseClickhouse | undefined; + /** Database */ database: string; user: string; password?: Secret; @@ -111,7 +118,15 @@ export interface ClickhouseSource { connection?: ClickhouseConnection; subnetId: string; securityGroups: string[]; + /** + * While list of tables for replication. If none or empty list is presented - will + * replicate all tables. Can contain * patterns. + */ includeTables: string[]; + /** + * Exclude list of tables for replication. If none or empty list is presented - + * will replicate all tables. Can contain * patterns. + */ excludeTables: string[]; } @@ -121,6 +136,7 @@ export interface ClickhouseTarget { subnetId: string; securityGroups: string[]; clickhouseClusterName: string; + /** Alternative table names in target */ altNames: AltName[]; sharding?: ClickhouseSharding; cleanupPolicy: ClickhouseCleanupPolicy; diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts index a926973e..079cdcfb 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts @@ -8,8 +8,11 @@ export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; export enum ObjectTransferStage { OBJECT_TRANSFER_STAGE_UNSPECIFIED = 0, + /** BEFORE_DATA - Before data transfer */ BEFORE_DATA = 1, + /** AFTER_DATA - After data transfer */ AFTER_DATA = 2, + /** NEVER - Don't copy */ NEVER = 3, UNRECOGNIZED = -1, } @@ -52,8 +55,11 @@ export function objectTransferStageToJSON(object: ObjectTransferStage): string { export enum CleanupPolicy { CLEANUP_POLICY_UNSPECIFIED = 0, + /** DISABLED - Don't cleanup */ DISABLED = 1, + /** DROP - Drop */ DROP = 2, + /** TRUNCATE - Truncate */ TRUNCATE = 3, UNRECOGNIZED = -1, } @@ -94,20 +100,139 @@ export function cleanupPolicyToJSON(object: CleanupPolicy): string { } } +export enum ColumnType { + COLUMN_TYPE_UNSPECIFIED = 0, + INT64 = 14, + INT32 = 1, + INT16 = 2, + INT8 = 3, + UINT64 = 4, + UINT32 = 5, + UINT16 = 6, + UINT8 = 7, + DOUBLE = 8, + BOOLEAN = 9, + STRING = 10, + UTF8 = 11, + ANY = 12, + DATETIME = 13, + UNRECOGNIZED = -1, +} + +export function columnTypeFromJSON(object: any): ColumnType { + switch (object) { + case 0: + case "COLUMN_TYPE_UNSPECIFIED": + return ColumnType.COLUMN_TYPE_UNSPECIFIED; + case 14: + case "INT64": + return ColumnType.INT64; + case 1: + case "INT32": + return ColumnType.INT32; + case 2: + case "INT16": + return ColumnType.INT16; + case 3: + case "INT8": + return ColumnType.INT8; + case 4: + case "UINT64": + return ColumnType.UINT64; + case 5: + case "UINT32": + return ColumnType.UINT32; + case 6: + case "UINT16": + return ColumnType.UINT16; + case 7: + case "UINT8": + return ColumnType.UINT8; + case 8: + case "DOUBLE": + return ColumnType.DOUBLE; + case 9: + case "BOOLEAN": + return ColumnType.BOOLEAN; + case 10: + case "STRING": + return ColumnType.STRING; + case 11: + case "UTF8": + return ColumnType.UTF8; + case 12: + case "ANY": + return ColumnType.ANY; + case 13: + case "DATETIME": + return ColumnType.DATETIME; + case -1: + case "UNRECOGNIZED": + default: + return ColumnType.UNRECOGNIZED; + } +} + +export function columnTypeToJSON(object: ColumnType): string { + switch (object) { + case ColumnType.COLUMN_TYPE_UNSPECIFIED: + return "COLUMN_TYPE_UNSPECIFIED"; + case ColumnType.INT64: + return "INT64"; + case ColumnType.INT32: + return "INT32"; + case ColumnType.INT16: + return "INT16"; + case ColumnType.INT8: + return "INT8"; + case ColumnType.UINT64: + return "UINT64"; + case ColumnType.UINT32: + return "UINT32"; + case ColumnType.UINT16: + return "UINT16"; + case ColumnType.UINT8: + return "UINT8"; + case ColumnType.DOUBLE: + return "DOUBLE"; + case ColumnType.BOOLEAN: + return "BOOLEAN"; + case ColumnType.STRING: + return "STRING"; + case ColumnType.UTF8: + return "UTF8"; + case ColumnType.ANY: + return "ANY"; + case ColumnType.DATETIME: + return "DATETIME"; + default: + return "UNKNOWN"; + } +} + export interface AltName { $type: "yandex.cloud.datatransfer.v1.endpoint.AltName"; - /** From table name */ + /** Source table name */ fromName: string; - /** To table name */ + /** Target table name */ toName: string; } export interface Secret { $type: "yandex.cloud.datatransfer.v1.endpoint.Secret"; - /** Password */ + /** Raw secret value */ raw: string | undefined; } +export interface ColSchema { + $type: "yandex.cloud.datatransfer.v1.endpoint.ColSchema"; + name: string; + type: ColumnType; + key: boolean; + required: boolean; + path: string; +} + export interface TLSMode { $type: "yandex.cloud.datatransfer.v1.endpoint.TLSMode"; disabled?: Empty | undefined; @@ -131,6 +256,39 @@ export interface ColumnValue { stringValue: string | undefined; } +export interface DataTransformationOptions { + $type: "yandex.cloud.datatransfer.v1.endpoint.DataTransformationOptions"; + /** Cloud function */ + cloudFunction: string; + /** Service account */ + serviceAccountId: string; + /** Number of retries */ + numberOfRetries: number; + /** Buffer size for function */ + bufferSize: string; + /** Flush interval */ + bufferFlushInterval: string; + /** Invocation timeout */ + invocationTimeout: string; +} + +export interface FieldList { + $type: "yandex.cloud.datatransfer.v1.endpoint.FieldList"; + /** Column schema */ + fields: ColSchema[]; +} + +export interface DataSchema { + $type: "yandex.cloud.datatransfer.v1.endpoint.DataSchema"; + fields?: FieldList | undefined; + jsonFields: string | undefined; +} + +/** No authentication */ +export interface NoAuth { + $type: "yandex.cloud.datatransfer.v1.endpoint.NoAuth"; +} + const baseAltName: object = { $type: "yandex.cloud.datatransfer.v1.endpoint.AltName", fromName: "", @@ -263,6 +421,120 @@ export const Secret = { messageTypeRegistry.set(Secret.$type, Secret); +const baseColSchema: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ColSchema", + name: "", + type: 0, + key: false, + required: false, + path: "", +}; + +export const ColSchema = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ColSchema" as const, + + encode( + message: ColSchema, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.type !== 0) { + writer.uint32(16).int32(message.type); + } + if (message.key === true) { + writer.uint32(24).bool(message.key); + } + if (message.required === true) { + writer.uint32(32).bool(message.required); + } + if (message.path !== "") { + writer.uint32(42).string(message.path); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ColSchema { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseColSchema } as ColSchema; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.int32() as any; + break; + case 3: + message.key = reader.bool(); + break; + case 4: + message.required = reader.bool(); + break; + case 5: + message.path = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ColSchema { + const message = { ...baseColSchema } as ColSchema; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.type = + object.type !== undefined && object.type !== null + ? columnTypeFromJSON(object.type) + : 0; + message.key = + object.key !== undefined && object.key !== null + ? Boolean(object.key) + : false; + message.required = + object.required !== undefined && object.required !== null + ? Boolean(object.required) + : false; + message.path = + object.path !== undefined && object.path !== null + ? String(object.path) + : ""; + return message; + }, + + toJSON(message: ColSchema): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.type !== undefined && (obj.type = columnTypeToJSON(message.type)); + message.key !== undefined && (obj.key = message.key); + message.required !== undefined && (obj.required = message.required); + message.path !== undefined && (obj.path = message.path); + return obj; + }, + + fromPartial, I>>( + object: I + ): ColSchema { + const message = { ...baseColSchema } as ColSchema; + message.name = object.name ?? ""; + message.type = object.type ?? 0; + message.key = object.key ?? false; + message.required = object.required ?? false; + message.path = object.path ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ColSchema.$type, ColSchema); + const baseTLSMode: object = { $type: "yandex.cloud.datatransfer.v1.endpoint.TLSMode", }; @@ -471,6 +743,354 @@ export const ColumnValue = { messageTypeRegistry.set(ColumnValue.$type, ColumnValue); +const baseDataTransformationOptions: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.DataTransformationOptions", + cloudFunction: "", + serviceAccountId: "", + numberOfRetries: 0, + bufferSize: "", + bufferFlushInterval: "", + invocationTimeout: "", +}; + +export const DataTransformationOptions = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.DataTransformationOptions" as const, + + encode( + message: DataTransformationOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cloudFunction !== "") { + writer.uint32(10).string(message.cloudFunction); + } + if (message.serviceAccountId !== "") { + writer.uint32(66).string(message.serviceAccountId); + } + if (message.numberOfRetries !== 0) { + writer.uint32(16).int64(message.numberOfRetries); + } + if (message.bufferSize !== "") { + writer.uint32(26).string(message.bufferSize); + } + if (message.bufferFlushInterval !== "") { + writer.uint32(34).string(message.bufferFlushInterval); + } + if (message.invocationTimeout !== "") { + writer.uint32(42).string(message.invocationTimeout); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DataTransformationOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDataTransformationOptions, + } as DataTransformationOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cloudFunction = reader.string(); + break; + case 8: + message.serviceAccountId = reader.string(); + break; + case 2: + message.numberOfRetries = longToNumber(reader.int64() as Long); + break; + case 3: + message.bufferSize = reader.string(); + break; + case 4: + message.bufferFlushInterval = reader.string(); + break; + case 5: + message.invocationTimeout = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DataTransformationOptions { + const message = { + ...baseDataTransformationOptions, + } as DataTransformationOptions; + message.cloudFunction = + object.cloudFunction !== undefined && object.cloudFunction !== null + ? String(object.cloudFunction) + : ""; + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.numberOfRetries = + object.numberOfRetries !== undefined && object.numberOfRetries !== null + ? Number(object.numberOfRetries) + : 0; + message.bufferSize = + object.bufferSize !== undefined && object.bufferSize !== null + ? String(object.bufferSize) + : ""; + message.bufferFlushInterval = + object.bufferFlushInterval !== undefined && + object.bufferFlushInterval !== null + ? String(object.bufferFlushInterval) + : ""; + message.invocationTimeout = + object.invocationTimeout !== undefined && + object.invocationTimeout !== null + ? String(object.invocationTimeout) + : ""; + return message; + }, + + toJSON(message: DataTransformationOptions): unknown { + const obj: any = {}; + message.cloudFunction !== undefined && + (obj.cloudFunction = message.cloudFunction); + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.numberOfRetries !== undefined && + (obj.numberOfRetries = Math.round(message.numberOfRetries)); + message.bufferSize !== undefined && (obj.bufferSize = message.bufferSize); + message.bufferFlushInterval !== undefined && + (obj.bufferFlushInterval = message.bufferFlushInterval); + message.invocationTimeout !== undefined && + (obj.invocationTimeout = message.invocationTimeout); + return obj; + }, + + fromPartial, I>>( + object: I + ): DataTransformationOptions { + const message = { + ...baseDataTransformationOptions, + } as DataTransformationOptions; + message.cloudFunction = object.cloudFunction ?? ""; + message.serviceAccountId = object.serviceAccountId ?? ""; + message.numberOfRetries = object.numberOfRetries ?? 0; + message.bufferSize = object.bufferSize ?? ""; + message.bufferFlushInterval = object.bufferFlushInterval ?? ""; + message.invocationTimeout = object.invocationTimeout ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DataTransformationOptions.$type, + DataTransformationOptions +); + +const baseFieldList: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.FieldList", +}; + +export const FieldList = { + $type: "yandex.cloud.datatransfer.v1.endpoint.FieldList" as const, + + encode( + message: FieldList, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.fields) { + ColSchema.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): FieldList { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseFieldList } as FieldList; + message.fields = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.fields.push(ColSchema.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): FieldList { + const message = { ...baseFieldList } as FieldList; + message.fields = (object.fields ?? []).map((e: any) => + ColSchema.fromJSON(e) + ); + return message; + }, + + toJSON(message: FieldList): unknown { + const obj: any = {}; + if (message.fields) { + obj.fields = message.fields.map((e) => + e ? ColSchema.toJSON(e) : undefined + ); + } else { + obj.fields = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): FieldList { + const message = { ...baseFieldList } as FieldList; + message.fields = object.fields?.map((e) => ColSchema.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(FieldList.$type, FieldList); + +const baseDataSchema: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.DataSchema", +}; + +export const DataSchema = { + $type: "yandex.cloud.datatransfer.v1.endpoint.DataSchema" as const, + + encode( + message: DataSchema, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.fields !== undefined) { + FieldList.encode(message.fields, writer.uint32(18).fork()).ldelim(); + } + if (message.jsonFields !== undefined) { + writer.uint32(10).string(message.jsonFields); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DataSchema { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDataSchema } as DataSchema; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.fields = FieldList.decode(reader, reader.uint32()); + break; + case 1: + message.jsonFields = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DataSchema { + const message = { ...baseDataSchema } as DataSchema; + message.fields = + object.fields !== undefined && object.fields !== null + ? FieldList.fromJSON(object.fields) + : undefined; + message.jsonFields = + object.jsonFields !== undefined && object.jsonFields !== null + ? String(object.jsonFields) + : undefined; + return message; + }, + + toJSON(message: DataSchema): unknown { + const obj: any = {}; + message.fields !== undefined && + (obj.fields = message.fields + ? FieldList.toJSON(message.fields) + : undefined); + message.jsonFields !== undefined && (obj.jsonFields = message.jsonFields); + return obj; + }, + + fromPartial, I>>( + object: I + ): DataSchema { + const message = { ...baseDataSchema } as DataSchema; + message.fields = + object.fields !== undefined && object.fields !== null + ? FieldList.fromPartial(object.fields) + : undefined; + message.jsonFields = object.jsonFields ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(DataSchema.$type, DataSchema); + +const baseNoAuth: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.NoAuth", +}; + +export const NoAuth = { + $type: "yandex.cloud.datatransfer.v1.endpoint.NoAuth" as const, + + encode(_: NoAuth, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): NoAuth { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseNoAuth } as NoAuth; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): NoAuth { + const message = { ...baseNoAuth } as NoAuth; + return message; + }, + + toJSON(_: NoAuth): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>(_: I): NoAuth { + const message = { ...baseNoAuth } as NoAuth; + return message; + }, +}; + +messageTypeRegistry.set(NoAuth.$type, NoAuth); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + type Builtin = | Date | Function @@ -498,6 +1118,13 @@ export type Exact = P extends Builtin never >; +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + if (_m0.util.Long !== Long) { _m0.util.Long = Long as any; _m0.configure(); diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/kafka.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/kafka.ts new file mode 100644 index 00000000..ae6f6c7f --- /dev/null +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/kafka.ts @@ -0,0 +1,1008 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + TLSMode, + Secret, + DataTransformationOptions, + NoAuth, +} from "../../../../../yandex/cloud/datatransfer/v1/endpoint/common"; +import { Parser } from "../../../../../yandex/cloud/datatransfer/v1/endpoint/parsers"; +import { Serializer } from "../../../../../yandex/cloud/datatransfer/v1/endpoint/serializers"; + +export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; + +export enum KafkaMechanism { + KAFKA_MECHANISM_UNSPECIFIED = 0, + KAFKA_MECHANISM_SHA256 = 1, + KAFKA_MECHANISM_SHA512 = 2, + UNRECOGNIZED = -1, +} + +export function kafkaMechanismFromJSON(object: any): KafkaMechanism { + switch (object) { + case 0: + case "KAFKA_MECHANISM_UNSPECIFIED": + return KafkaMechanism.KAFKA_MECHANISM_UNSPECIFIED; + case 1: + case "KAFKA_MECHANISM_SHA256": + return KafkaMechanism.KAFKA_MECHANISM_SHA256; + case 2: + case "KAFKA_MECHANISM_SHA512": + return KafkaMechanism.KAFKA_MECHANISM_SHA512; + case -1: + case "UNRECOGNIZED": + default: + return KafkaMechanism.UNRECOGNIZED; + } +} + +export function kafkaMechanismToJSON(object: KafkaMechanism): string { + switch (object) { + case KafkaMechanism.KAFKA_MECHANISM_UNSPECIFIED: + return "KAFKA_MECHANISM_UNSPECIFIED"; + case KafkaMechanism.KAFKA_MECHANISM_SHA256: + return "KAFKA_MECHANISM_SHA256"; + case KafkaMechanism.KAFKA_MECHANISM_SHA512: + return "KAFKA_MECHANISM_SHA512"; + default: + return "UNKNOWN"; + } +} + +export interface KafkaConnectionOptions { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaConnectionOptions"; + /** Managed Service for Kafka cluster ID */ + clusterId: string | undefined; + /** Connection options for on-premise Kafka */ + onPremise?: OnPremiseKafka | undefined; +} + +export interface OnPremiseKafka { + $type: "yandex.cloud.datatransfer.v1.endpoint.OnPremiseKafka"; + /** Kafka broker URLs */ + brokerUrls: string[]; + /** TLS settings for broker connection. Disabled by default. */ + tlsMode?: TLSMode; + /** Network interface for endpoint. If none will assume public ipv4 */ + subnetId: string; +} + +export interface KafkaAuth { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaAuth"; + /** Authentication with SASL */ + sasl?: KafkaSaslSecurity | undefined; + /** No authentication */ + noAuth?: NoAuth | undefined; +} + +export interface KafkaSaslSecurity { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaSaslSecurity"; + /** User name */ + user: string; + /** Password for user */ + password?: Secret; + /** SASL mechanism for authentication */ + mechanism: KafkaMechanism; +} + +export interface KafkaSource { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaSource"; + /** Connection settings */ + connection?: KafkaConnectionOptions; + /** Authentication settings */ + auth?: KafkaAuth; + /** Security groups */ + securityGroups: string[]; + /** Full source topic name */ + topicName: string; + /** Data transformation rules */ + transformer?: DataTransformationOptions; + /** Data parsing rules */ + parser?: Parser; +} + +export interface KafkaTarget { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaTarget"; + /** Connection settings */ + connection?: KafkaConnectionOptions; + /** Authentication settings */ + auth?: KafkaAuth; + /** Security groups */ + securityGroups: string[]; + /** Target topic settings */ + topicSettings?: KafkaTargetTopicSettings; + /** Data serialization format settings */ + serializer?: Serializer; +} + +export interface KafkaTargetTopicSettings { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaTargetTopicSettings"; + /** Full topic name */ + topic?: KafkaTargetTopic | undefined; + /** + * Topic prefix + * + * Analogue of the Debezium setting database.server.name. + * Messages will be sent to topic with name ... + */ + topicPrefix: string | undefined; +} + +export interface KafkaTargetTopic { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaTargetTopic"; + /** Topic name */ + topicName: string; + /** + * Save transactions order + * Not to split events queue into separate per-table queues. + */ + saveTxOrder: boolean; +} + +const baseKafkaConnectionOptions: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaConnectionOptions", +}; + +export const KafkaConnectionOptions = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.KafkaConnectionOptions" as const, + + encode( + message: KafkaConnectionOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== undefined) { + writer.uint32(10).string(message.clusterId); + } + if (message.onPremise !== undefined) { + OnPremiseKafka.encode( + message.onPremise, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): KafkaConnectionOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseKafkaConnectionOptions } as KafkaConnectionOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.onPremise = OnPremiseKafka.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): KafkaConnectionOptions { + const message = { ...baseKafkaConnectionOptions } as KafkaConnectionOptions; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : undefined; + message.onPremise = + object.onPremise !== undefined && object.onPremise !== null + ? OnPremiseKafka.fromJSON(object.onPremise) + : undefined; + return message; + }, + + toJSON(message: KafkaConnectionOptions): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.onPremise !== undefined && + (obj.onPremise = message.onPremise + ? OnPremiseKafka.toJSON(message.onPremise) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): KafkaConnectionOptions { + const message = { ...baseKafkaConnectionOptions } as KafkaConnectionOptions; + message.clusterId = object.clusterId ?? undefined; + message.onPremise = + object.onPremise !== undefined && object.onPremise !== null + ? OnPremiseKafka.fromPartial(object.onPremise) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(KafkaConnectionOptions.$type, KafkaConnectionOptions); + +const baseOnPremiseKafka: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.OnPremiseKafka", + brokerUrls: "", + subnetId: "", +}; + +export const OnPremiseKafka = { + $type: "yandex.cloud.datatransfer.v1.endpoint.OnPremiseKafka" as const, + + encode( + message: OnPremiseKafka, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.brokerUrls) { + writer.uint32(10).string(v!); + } + if (message.tlsMode !== undefined) { + TLSMode.encode(message.tlsMode, writer.uint32(42).fork()).ldelim(); + } + if (message.subnetId !== "") { + writer.uint32(34).string(message.subnetId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): OnPremiseKafka { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOnPremiseKafka } as OnPremiseKafka; + message.brokerUrls = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerUrls.push(reader.string()); + break; + case 5: + message.tlsMode = TLSMode.decode(reader, reader.uint32()); + break; + case 4: + message.subnetId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OnPremiseKafka { + const message = { ...baseOnPremiseKafka } as OnPremiseKafka; + message.brokerUrls = (object.brokerUrls ?? []).map((e: any) => String(e)); + message.tlsMode = + object.tlsMode !== undefined && object.tlsMode !== null + ? TLSMode.fromJSON(object.tlsMode) + : undefined; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + return message; + }, + + toJSON(message: OnPremiseKafka): unknown { + const obj: any = {}; + if (message.brokerUrls) { + obj.brokerUrls = message.brokerUrls.map((e) => e); + } else { + obj.brokerUrls = []; + } + message.tlsMode !== undefined && + (obj.tlsMode = message.tlsMode + ? TLSMode.toJSON(message.tlsMode) + : undefined); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + return obj; + }, + + fromPartial, I>>( + object: I + ): OnPremiseKafka { + const message = { ...baseOnPremiseKafka } as OnPremiseKafka; + message.brokerUrls = object.brokerUrls?.map((e) => e) || []; + message.tlsMode = + object.tlsMode !== undefined && object.tlsMode !== null + ? TLSMode.fromPartial(object.tlsMode) + : undefined; + message.subnetId = object.subnetId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(OnPremiseKafka.$type, OnPremiseKafka); + +const baseKafkaAuth: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaAuth", +}; + +export const KafkaAuth = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaAuth" as const, + + encode( + message: KafkaAuth, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sasl !== undefined) { + KafkaSaslSecurity.encode(message.sasl, writer.uint32(10).fork()).ldelim(); + } + if (message.noAuth !== undefined) { + NoAuth.encode(message.noAuth, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): KafkaAuth { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseKafkaAuth } as KafkaAuth; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sasl = KafkaSaslSecurity.decode(reader, reader.uint32()); + break; + case 2: + message.noAuth = NoAuth.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): KafkaAuth { + const message = { ...baseKafkaAuth } as KafkaAuth; + message.sasl = + object.sasl !== undefined && object.sasl !== null + ? KafkaSaslSecurity.fromJSON(object.sasl) + : undefined; + message.noAuth = + object.noAuth !== undefined && object.noAuth !== null + ? NoAuth.fromJSON(object.noAuth) + : undefined; + return message; + }, + + toJSON(message: KafkaAuth): unknown { + const obj: any = {}; + message.sasl !== undefined && + (obj.sasl = message.sasl + ? KafkaSaslSecurity.toJSON(message.sasl) + : undefined); + message.noAuth !== undefined && + (obj.noAuth = message.noAuth ? NoAuth.toJSON(message.noAuth) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): KafkaAuth { + const message = { ...baseKafkaAuth } as KafkaAuth; + message.sasl = + object.sasl !== undefined && object.sasl !== null + ? KafkaSaslSecurity.fromPartial(object.sasl) + : undefined; + message.noAuth = + object.noAuth !== undefined && object.noAuth !== null + ? NoAuth.fromPartial(object.noAuth) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(KafkaAuth.$type, KafkaAuth); + +const baseKafkaSaslSecurity: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaSaslSecurity", + user: "", + mechanism: 0, +}; + +export const KafkaSaslSecurity = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaSaslSecurity" as const, + + encode( + message: KafkaSaslSecurity, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.user !== "") { + writer.uint32(10).string(message.user); + } + if (message.password !== undefined) { + Secret.encode(message.password, writer.uint32(34).fork()).ldelim(); + } + if (message.mechanism !== 0) { + writer.uint32(24).int32(message.mechanism); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): KafkaSaslSecurity { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseKafkaSaslSecurity } as KafkaSaslSecurity; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.user = reader.string(); + break; + case 4: + message.password = Secret.decode(reader, reader.uint32()); + break; + case 3: + message.mechanism = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): KafkaSaslSecurity { + const message = { ...baseKafkaSaslSecurity } as KafkaSaslSecurity; + message.user = + object.user !== undefined && object.user !== null + ? String(object.user) + : ""; + message.password = + object.password !== undefined && object.password !== null + ? Secret.fromJSON(object.password) + : undefined; + message.mechanism = + object.mechanism !== undefined && object.mechanism !== null + ? kafkaMechanismFromJSON(object.mechanism) + : 0; + return message; + }, + + toJSON(message: KafkaSaslSecurity): unknown { + const obj: any = {}; + message.user !== undefined && (obj.user = message.user); + message.password !== undefined && + (obj.password = message.password + ? Secret.toJSON(message.password) + : undefined); + message.mechanism !== undefined && + (obj.mechanism = kafkaMechanismToJSON(message.mechanism)); + return obj; + }, + + fromPartial, I>>( + object: I + ): KafkaSaslSecurity { + const message = { ...baseKafkaSaslSecurity } as KafkaSaslSecurity; + message.user = object.user ?? ""; + message.password = + object.password !== undefined && object.password !== null + ? Secret.fromPartial(object.password) + : undefined; + message.mechanism = object.mechanism ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(KafkaSaslSecurity.$type, KafkaSaslSecurity); + +const baseKafkaSource: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaSource", + securityGroups: "", + topicName: "", +}; + +export const KafkaSource = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaSource" as const, + + encode( + message: KafkaSource, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connection !== undefined) { + KafkaConnectionOptions.encode( + message.connection, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.auth !== undefined) { + KafkaAuth.encode(message.auth, writer.uint32(18).fork()).ldelim(); + } + for (const v of message.securityGroups) { + writer.uint32(26).string(v!); + } + if (message.topicName !== "") { + writer.uint32(34).string(message.topicName); + } + if (message.transformer !== undefined) { + DataTransformationOptions.encode( + message.transformer, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.parser !== undefined) { + Parser.encode(message.parser, writer.uint32(58).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): KafkaSource { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseKafkaSource } as KafkaSource; + message.securityGroups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.connection = KafkaConnectionOptions.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.auth = KafkaAuth.decode(reader, reader.uint32()); + break; + case 3: + message.securityGroups.push(reader.string()); + break; + case 4: + message.topicName = reader.string(); + break; + case 5: + message.transformer = DataTransformationOptions.decode( + reader, + reader.uint32() + ); + break; + case 7: + message.parser = Parser.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): KafkaSource { + const message = { ...baseKafkaSource } as KafkaSource; + message.connection = + object.connection !== undefined && object.connection !== null + ? KafkaConnectionOptions.fromJSON(object.connection) + : undefined; + message.auth = + object.auth !== undefined && object.auth !== null + ? KafkaAuth.fromJSON(object.auth) + : undefined; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); + message.topicName = + object.topicName !== undefined && object.topicName !== null + ? String(object.topicName) + : ""; + message.transformer = + object.transformer !== undefined && object.transformer !== null + ? DataTransformationOptions.fromJSON(object.transformer) + : undefined; + message.parser = + object.parser !== undefined && object.parser !== null + ? Parser.fromJSON(object.parser) + : undefined; + return message; + }, + + toJSON(message: KafkaSource): unknown { + const obj: any = {}; + message.connection !== undefined && + (obj.connection = message.connection + ? KafkaConnectionOptions.toJSON(message.connection) + : undefined); + message.auth !== undefined && + (obj.auth = message.auth ? KafkaAuth.toJSON(message.auth) : undefined); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } + message.topicName !== undefined && (obj.topicName = message.topicName); + message.transformer !== undefined && + (obj.transformer = message.transformer + ? DataTransformationOptions.toJSON(message.transformer) + : undefined); + message.parser !== undefined && + (obj.parser = message.parser ? Parser.toJSON(message.parser) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): KafkaSource { + const message = { ...baseKafkaSource } as KafkaSource; + message.connection = + object.connection !== undefined && object.connection !== null + ? KafkaConnectionOptions.fromPartial(object.connection) + : undefined; + message.auth = + object.auth !== undefined && object.auth !== null + ? KafkaAuth.fromPartial(object.auth) + : undefined; + message.securityGroups = object.securityGroups?.map((e) => e) || []; + message.topicName = object.topicName ?? ""; + message.transformer = + object.transformer !== undefined && object.transformer !== null + ? DataTransformationOptions.fromPartial(object.transformer) + : undefined; + message.parser = + object.parser !== undefined && object.parser !== null + ? Parser.fromPartial(object.parser) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(KafkaSource.$type, KafkaSource); + +const baseKafkaTarget: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaTarget", + securityGroups: "", +}; + +export const KafkaTarget = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaTarget" as const, + + encode( + message: KafkaTarget, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connection !== undefined) { + KafkaConnectionOptions.encode( + message.connection, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.auth !== undefined) { + KafkaAuth.encode(message.auth, writer.uint32(18).fork()).ldelim(); + } + for (const v of message.securityGroups) { + writer.uint32(26).string(v!); + } + if (message.topicSettings !== undefined) { + KafkaTargetTopicSettings.encode( + message.topicSettings, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.serializer !== undefined) { + Serializer.encode(message.serializer, writer.uint32(66).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): KafkaTarget { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseKafkaTarget } as KafkaTarget; + message.securityGroups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.connection = KafkaConnectionOptions.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.auth = KafkaAuth.decode(reader, reader.uint32()); + break; + case 3: + message.securityGroups.push(reader.string()); + break; + case 7: + message.topicSettings = KafkaTargetTopicSettings.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.serializer = Serializer.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): KafkaTarget { + const message = { ...baseKafkaTarget } as KafkaTarget; + message.connection = + object.connection !== undefined && object.connection !== null + ? KafkaConnectionOptions.fromJSON(object.connection) + : undefined; + message.auth = + object.auth !== undefined && object.auth !== null + ? KafkaAuth.fromJSON(object.auth) + : undefined; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); + message.topicSettings = + object.topicSettings !== undefined && object.topicSettings !== null + ? KafkaTargetTopicSettings.fromJSON(object.topicSettings) + : undefined; + message.serializer = + object.serializer !== undefined && object.serializer !== null + ? Serializer.fromJSON(object.serializer) + : undefined; + return message; + }, + + toJSON(message: KafkaTarget): unknown { + const obj: any = {}; + message.connection !== undefined && + (obj.connection = message.connection + ? KafkaConnectionOptions.toJSON(message.connection) + : undefined); + message.auth !== undefined && + (obj.auth = message.auth ? KafkaAuth.toJSON(message.auth) : undefined); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } + message.topicSettings !== undefined && + (obj.topicSettings = message.topicSettings + ? KafkaTargetTopicSettings.toJSON(message.topicSettings) + : undefined); + message.serializer !== undefined && + (obj.serializer = message.serializer + ? Serializer.toJSON(message.serializer) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): KafkaTarget { + const message = { ...baseKafkaTarget } as KafkaTarget; + message.connection = + object.connection !== undefined && object.connection !== null + ? KafkaConnectionOptions.fromPartial(object.connection) + : undefined; + message.auth = + object.auth !== undefined && object.auth !== null + ? KafkaAuth.fromPartial(object.auth) + : undefined; + message.securityGroups = object.securityGroups?.map((e) => e) || []; + message.topicSettings = + object.topicSettings !== undefined && object.topicSettings !== null + ? KafkaTargetTopicSettings.fromPartial(object.topicSettings) + : undefined; + message.serializer = + object.serializer !== undefined && object.serializer !== null + ? Serializer.fromPartial(object.serializer) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(KafkaTarget.$type, KafkaTarget); + +const baseKafkaTargetTopicSettings: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaTargetTopicSettings", +}; + +export const KafkaTargetTopicSettings = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.KafkaTargetTopicSettings" as const, + + encode( + message: KafkaTargetTopicSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.topic !== undefined) { + KafkaTargetTopic.encode(message.topic, writer.uint32(10).fork()).ldelim(); + } + if (message.topicPrefix !== undefined) { + writer.uint32(18).string(message.topicPrefix); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): KafkaTargetTopicSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseKafkaTargetTopicSettings, + } as KafkaTargetTopicSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.topic = KafkaTargetTopic.decode(reader, reader.uint32()); + break; + case 2: + message.topicPrefix = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): KafkaTargetTopicSettings { + const message = { + ...baseKafkaTargetTopicSettings, + } as KafkaTargetTopicSettings; + message.topic = + object.topic !== undefined && object.topic !== null + ? KafkaTargetTopic.fromJSON(object.topic) + : undefined; + message.topicPrefix = + object.topicPrefix !== undefined && object.topicPrefix !== null + ? String(object.topicPrefix) + : undefined; + return message; + }, + + toJSON(message: KafkaTargetTopicSettings): unknown { + const obj: any = {}; + message.topic !== undefined && + (obj.topic = message.topic + ? KafkaTargetTopic.toJSON(message.topic) + : undefined); + message.topicPrefix !== undefined && + (obj.topicPrefix = message.topicPrefix); + return obj; + }, + + fromPartial, I>>( + object: I + ): KafkaTargetTopicSettings { + const message = { + ...baseKafkaTargetTopicSettings, + } as KafkaTargetTopicSettings; + message.topic = + object.topic !== undefined && object.topic !== null + ? KafkaTargetTopic.fromPartial(object.topic) + : undefined; + message.topicPrefix = object.topicPrefix ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + KafkaTargetTopicSettings.$type, + KafkaTargetTopicSettings +); + +const baseKafkaTargetTopic: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaTargetTopic", + topicName: "", + saveTxOrder: false, +}; + +export const KafkaTargetTopic = { + $type: "yandex.cloud.datatransfer.v1.endpoint.KafkaTargetTopic" as const, + + encode( + message: KafkaTargetTopic, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.topicName !== "") { + writer.uint32(10).string(message.topicName); + } + if (message.saveTxOrder === true) { + writer.uint32(16).bool(message.saveTxOrder); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): KafkaTargetTopic { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseKafkaTargetTopic } as KafkaTargetTopic; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.topicName = reader.string(); + break; + case 2: + message.saveTxOrder = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): KafkaTargetTopic { + const message = { ...baseKafkaTargetTopic } as KafkaTargetTopic; + message.topicName = + object.topicName !== undefined && object.topicName !== null + ? String(object.topicName) + : ""; + message.saveTxOrder = + object.saveTxOrder !== undefined && object.saveTxOrder !== null + ? Boolean(object.saveTxOrder) + : false; + return message; + }, + + toJSON(message: KafkaTargetTopic): unknown { + const obj: any = {}; + message.topicName !== undefined && (obj.topicName = message.topicName); + message.saveTxOrder !== undefined && + (obj.saveTxOrder = message.saveTxOrder); + return obj; + }, + + fromPartial, I>>( + object: I + ): KafkaTargetTopic { + const message = { ...baseKafkaTargetTopic } as KafkaTargetTopic; + message.topicName = object.topicName ?? ""; + message.saveTxOrder = object.saveTxOrder ?? false; + return message; + }, +}; + +messageTypeRegistry.set(KafkaTargetTopic.$type, KafkaTargetTopic); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mongo.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mongo.ts index 678ea3e0..124522a2 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mongo.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mongo.ts @@ -24,8 +24,11 @@ export interface MongoConnectionOptions { $type: "yandex.cloud.datatransfer.v1.endpoint.MongoConnectionOptions"; mdbClusterId: string | undefined; onPremise?: OnPremiseMongo | undefined; + /** User name */ user: string; + /** Password for user */ password?: Secret; + /** Database name associated with the credentials */ authSource: string; } @@ -46,8 +49,17 @@ export interface MongoSource { subnetId: string; /** Security groups */ securityGroups: string[]; + /** + * List of collections for replication. Empty list implies replication of all + * tables on the deployment. Allowed to use * as collection name. + */ collections: MongoCollection[]; + /** + * List of forbidden collections for replication. Allowed to use * as collection + * name for forbid all collections of concrete schema. + */ excludedCollections: MongoCollection[]; + /** Read mode for mongo client */ secondaryPreferredMode: boolean; } @@ -57,6 +69,7 @@ export interface MongoTarget { subnetId: string; /** Security groups */ securityGroups: string[]; + /** Database name */ database: string; cleanupPolicy: CleanupPolicy; } diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts index 39751b0c..56c36046 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts @@ -18,39 +18,19 @@ export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; export interface OnPremiseMysql { $type: "yandex.cloud.datatransfer.v1.endpoint.OnPremiseMysql"; hosts: string[]; - /** - * Database port - * - * Default: 3306. - */ + /** Database port */ port: number; - /** - * TLS mode - * - * TLS settings for server connection. Disabled by default. - */ + /** TLS settings for server connection. Disabled by default. */ tlsMode?: TLSMode; - /** - * Network interface for endpoint - * - * Default: public IPv4. - */ + /** Network interface for endpoint. If none will assume public ipv4 */ subnetId: string; } export interface MysqlConnection { $type: "yandex.cloud.datatransfer.v1.endpoint.MysqlConnection"; - /** - * Managed cluster - * - * Managed Service for MySQL cluster ID - */ + /** Managed Service for MySQL cluster ID */ mdbClusterId: string | undefined; - /** - * On-premise - * - * Connection options for on-premise MySQL - */ + /** Connection options for on-premise MySQL */ onPremise?: OnPremiseMysql | undefined; } @@ -74,15 +54,12 @@ export interface MysqlObjectTransferSettings { * CREATE TRIGGER ... */ trigger: ObjectTransferStage; + tables: ObjectTransferStage; } export interface MysqlSource { $type: "yandex.cloud.datatransfer.v1.endpoint.MysqlSource"; - /** - * Connection settings - * - * Database connection settings - */ + /** Database connection settings */ connection?: MysqlConnection; /** Security groups */ securityGroups: string[]; @@ -100,17 +77,9 @@ export interface MysqlSource { * __tm_gtid_keeper). */ serviceDatabase: string; - /** - * Username - * - * User for database access. - */ + /** User for database access. */ user: string; - /** - * Password - * - * Password for database access. - */ + /** Password for database access. */ password?: Secret; includeTablesRegex: string[]; excludeTablesRegex: string[]; @@ -131,11 +100,7 @@ export interface MysqlSource { export interface MysqlTarget { $type: "yandex.cloud.datatransfer.v1.endpoint.MysqlTarget"; - /** - * Connection settings - * - * Database connection settings - */ + /** Database connection settings */ connection?: MysqlConnection; /** Security groups */ securityGroups: string[]; @@ -147,23 +112,11 @@ export interface MysqlTarget { * schema for service table. */ database: string; - /** - * Username - * - * User for database access. - */ + /** User for database access. */ user: string; - /** - * Password - * - * Password for database access. - */ + /** Password for database access. */ password?: Secret; - /** - * sql_mode - * - * Default: NO_AUTO_VALUE_ON_ZERO,NO_DIR_IN_CREATE,NO_ENGINE_SUBSTITUTION. - */ + /** Default: NO_AUTO_VALUE_ON_ZERO,NO_DIR_IN_CREATE,NO_ENGINE_SUBSTITUTION. */ sqlMode: string; /** * Disable constraints checks @@ -391,6 +344,7 @@ const baseMysqlObjectTransferSettings: object = { view: 0, routine: 0, trigger: 0, + tables: 0, }; export const MysqlObjectTransferSettings = { @@ -410,6 +364,9 @@ export const MysqlObjectTransferSettings = { if (message.trigger !== 0) { writer.uint32(24).int32(message.trigger); } + if (message.tables !== 0) { + writer.uint32(32).int32(message.tables); + } return writer; }, @@ -434,6 +391,9 @@ export const MysqlObjectTransferSettings = { case 3: message.trigger = reader.int32() as any; break; + case 4: + message.tables = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -458,6 +418,10 @@ export const MysqlObjectTransferSettings = { object.trigger !== undefined && object.trigger !== null ? objectTransferStageFromJSON(object.trigger) : 0; + message.tables = + object.tables !== undefined && object.tables !== null + ? objectTransferStageFromJSON(object.tables) + : 0; return message; }, @@ -469,6 +433,8 @@ export const MysqlObjectTransferSettings = { (obj.routine = objectTransferStageToJSON(message.routine)); message.trigger !== undefined && (obj.trigger = objectTransferStageToJSON(message.trigger)); + message.tables !== undefined && + (obj.tables = objectTransferStageToJSON(message.tables)); return obj; }, @@ -481,6 +447,7 @@ export const MysqlObjectTransferSettings = { message.view = object.view ?? 0; message.routine = object.routine ?? 0; message.trigger = object.trigger ?? 0; + message.tables = object.tables ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/parsers.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/parsers.ts new file mode 100644 index 00000000..40c903be --- /dev/null +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/parsers.ts @@ -0,0 +1,404 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { DataSchema } from "../../../../../yandex/cloud/datatransfer/v1/endpoint/common"; + +export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; + +export interface Parser { + $type: "yandex.cloud.datatransfer.v1.endpoint.Parser"; + jsonParser?: GenericParserCommon | undefined; + auditTrailsV1Parser?: AuditTrailsV1Parser | undefined; + cloudLoggingParser?: CloudLoggingParser | undefined; + tskvParser?: GenericParserCommon | undefined; +} + +export interface GenericParserCommon { + $type: "yandex.cloud.datatransfer.v1.endpoint.GenericParserCommon"; + dataSchema?: DataSchema; + /** Allow null keys, if no - null keys will be putted to unparsed data */ + nullKeysAllowed: boolean; + /** Will add _rest column for all unknown fields */ + addRestColumn: boolean; +} + +export interface AuditTrailsV1Parser { + $type: "yandex.cloud.datatransfer.v1.endpoint.AuditTrailsV1Parser"; +} + +export interface CloudLoggingParser { + $type: "yandex.cloud.datatransfer.v1.endpoint.CloudLoggingParser"; +} + +const baseParser: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.Parser", +}; + +export const Parser = { + $type: "yandex.cloud.datatransfer.v1.endpoint.Parser" as const, + + encode( + message: Parser, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.jsonParser !== undefined) { + GenericParserCommon.encode( + message.jsonParser, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.auditTrailsV1Parser !== undefined) { + AuditTrailsV1Parser.encode( + message.auditTrailsV1Parser, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.cloudLoggingParser !== undefined) { + CloudLoggingParser.encode( + message.cloudLoggingParser, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.tskvParser !== undefined) { + GenericParserCommon.encode( + message.tskvParser, + writer.uint32(50).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Parser { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseParser } as Parser; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.jsonParser = GenericParserCommon.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.auditTrailsV1Parser = AuditTrailsV1Parser.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.cloudLoggingParser = CloudLoggingParser.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.tskvParser = GenericParserCommon.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Parser { + const message = { ...baseParser } as Parser; + message.jsonParser = + object.jsonParser !== undefined && object.jsonParser !== null + ? GenericParserCommon.fromJSON(object.jsonParser) + : undefined; + message.auditTrailsV1Parser = + object.auditTrailsV1Parser !== undefined && + object.auditTrailsV1Parser !== null + ? AuditTrailsV1Parser.fromJSON(object.auditTrailsV1Parser) + : undefined; + message.cloudLoggingParser = + object.cloudLoggingParser !== undefined && + object.cloudLoggingParser !== null + ? CloudLoggingParser.fromJSON(object.cloudLoggingParser) + : undefined; + message.tskvParser = + object.tskvParser !== undefined && object.tskvParser !== null + ? GenericParserCommon.fromJSON(object.tskvParser) + : undefined; + return message; + }, + + toJSON(message: Parser): unknown { + const obj: any = {}; + message.jsonParser !== undefined && + (obj.jsonParser = message.jsonParser + ? GenericParserCommon.toJSON(message.jsonParser) + : undefined); + message.auditTrailsV1Parser !== undefined && + (obj.auditTrailsV1Parser = message.auditTrailsV1Parser + ? AuditTrailsV1Parser.toJSON(message.auditTrailsV1Parser) + : undefined); + message.cloudLoggingParser !== undefined && + (obj.cloudLoggingParser = message.cloudLoggingParser + ? CloudLoggingParser.toJSON(message.cloudLoggingParser) + : undefined); + message.tskvParser !== undefined && + (obj.tskvParser = message.tskvParser + ? GenericParserCommon.toJSON(message.tskvParser) + : undefined); + return obj; + }, + + fromPartial, I>>(object: I): Parser { + const message = { ...baseParser } as Parser; + message.jsonParser = + object.jsonParser !== undefined && object.jsonParser !== null + ? GenericParserCommon.fromPartial(object.jsonParser) + : undefined; + message.auditTrailsV1Parser = + object.auditTrailsV1Parser !== undefined && + object.auditTrailsV1Parser !== null + ? AuditTrailsV1Parser.fromPartial(object.auditTrailsV1Parser) + : undefined; + message.cloudLoggingParser = + object.cloudLoggingParser !== undefined && + object.cloudLoggingParser !== null + ? CloudLoggingParser.fromPartial(object.cloudLoggingParser) + : undefined; + message.tskvParser = + object.tskvParser !== undefined && object.tskvParser !== null + ? GenericParserCommon.fromPartial(object.tskvParser) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Parser.$type, Parser); + +const baseGenericParserCommon: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.GenericParserCommon", + nullKeysAllowed: false, + addRestColumn: false, +}; + +export const GenericParserCommon = { + $type: "yandex.cloud.datatransfer.v1.endpoint.GenericParserCommon" as const, + + encode( + message: GenericParserCommon, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dataSchema !== undefined) { + DataSchema.encode(message.dataSchema, writer.uint32(10).fork()).ldelim(); + } + if (message.nullKeysAllowed === true) { + writer.uint32(16).bool(message.nullKeysAllowed); + } + if (message.addRestColumn === true) { + writer.uint32(24).bool(message.addRestColumn); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GenericParserCommon { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGenericParserCommon } as GenericParserCommon; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dataSchema = DataSchema.decode(reader, reader.uint32()); + break; + case 2: + message.nullKeysAllowed = reader.bool(); + break; + case 3: + message.addRestColumn = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GenericParserCommon { + const message = { ...baseGenericParserCommon } as GenericParserCommon; + message.dataSchema = + object.dataSchema !== undefined && object.dataSchema !== null + ? DataSchema.fromJSON(object.dataSchema) + : undefined; + message.nullKeysAllowed = + object.nullKeysAllowed !== undefined && object.nullKeysAllowed !== null + ? Boolean(object.nullKeysAllowed) + : false; + message.addRestColumn = + object.addRestColumn !== undefined && object.addRestColumn !== null + ? Boolean(object.addRestColumn) + : false; + return message; + }, + + toJSON(message: GenericParserCommon): unknown { + const obj: any = {}; + message.dataSchema !== undefined && + (obj.dataSchema = message.dataSchema + ? DataSchema.toJSON(message.dataSchema) + : undefined); + message.nullKeysAllowed !== undefined && + (obj.nullKeysAllowed = message.nullKeysAllowed); + message.addRestColumn !== undefined && + (obj.addRestColumn = message.addRestColumn); + return obj; + }, + + fromPartial, I>>( + object: I + ): GenericParserCommon { + const message = { ...baseGenericParserCommon } as GenericParserCommon; + message.dataSchema = + object.dataSchema !== undefined && object.dataSchema !== null + ? DataSchema.fromPartial(object.dataSchema) + : undefined; + message.nullKeysAllowed = object.nullKeysAllowed ?? false; + message.addRestColumn = object.addRestColumn ?? false; + return message; + }, +}; + +messageTypeRegistry.set(GenericParserCommon.$type, GenericParserCommon); + +const baseAuditTrailsV1Parser: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.AuditTrailsV1Parser", +}; + +export const AuditTrailsV1Parser = { + $type: "yandex.cloud.datatransfer.v1.endpoint.AuditTrailsV1Parser" as const, + + encode( + _: AuditTrailsV1Parser, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AuditTrailsV1Parser { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAuditTrailsV1Parser } as AuditTrailsV1Parser; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): AuditTrailsV1Parser { + const message = { ...baseAuditTrailsV1Parser } as AuditTrailsV1Parser; + return message; + }, + + toJSON(_: AuditTrailsV1Parser): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): AuditTrailsV1Parser { + const message = { ...baseAuditTrailsV1Parser } as AuditTrailsV1Parser; + return message; + }, +}; + +messageTypeRegistry.set(AuditTrailsV1Parser.$type, AuditTrailsV1Parser); + +const baseCloudLoggingParser: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.CloudLoggingParser", +}; + +export const CloudLoggingParser = { + $type: "yandex.cloud.datatransfer.v1.endpoint.CloudLoggingParser" as const, + + encode( + _: CloudLoggingParser, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CloudLoggingParser { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCloudLoggingParser } as CloudLoggingParser; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): CloudLoggingParser { + const message = { ...baseCloudLoggingParser } as CloudLoggingParser; + return message; + }, + + toJSON(_: CloudLoggingParser): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): CloudLoggingParser { + const message = { ...baseCloudLoggingParser } as CloudLoggingParser; + return message; + }, +}; + +messageTypeRegistry.set(CloudLoggingParser.$type, CloudLoggingParser); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts index 044424d6..e070f42f 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts @@ -29,6 +29,8 @@ export interface PostgresObjectTransferSettings { * CREATE SEQUENCE ... OWNED BY ... */ sequenceOwnedBy: ObjectTransferStage; + /** */ + sequenceSet: ObjectTransferStage; /** * Tables * @@ -71,6 +73,12 @@ export interface PostgresObjectTransferSettings { * CREATE VIEW ... */ view: ObjectTransferStage; + /** + * Materialized views + * + * CREATE MATERIALIZED VIEW ... + */ + materializedView: ObjectTransferStage; /** * Functions * @@ -113,76 +121,38 @@ export interface PostgresObjectTransferSettings { * CREATE CAST ... */ cast: ObjectTransferStage; - /** - * Materialized views - * - * CREATE MATERIALIZED VIEW ... - */ - materializedView: ObjectTransferStage; } export interface OnPremisePostgres { $type: "yandex.cloud.datatransfer.v1.endpoint.OnPremisePostgres"; hosts: string[]; - /** - * Database port - * - * Will be used if the cluster ID is not specified. Default: 6432. - */ + /** Will be used if the cluster ID is not specified. */ port: number; - /** - * TLS mode - * - * TLS settings for server connection. Disabled by default. - */ + /** TLS settings for server connection. Disabled by default. */ tlsMode?: TLSMode; - /** - * Network interface for endpoint - * - * Default: public IPv4. - */ + /** Network interface for endpoint. If none will assume public ipv4 */ subnetId: string; } export interface PostgresConnection { $type: "yandex.cloud.datatransfer.v1.endpoint.PostgresConnection"; - /** - * Managed cluster - * - * Managed Service for PostgreSQL cluster ID - */ + /** Managed Service for PostgreSQL cluster ID */ mdbClusterId: string | undefined; - /** - * On-premise - * - * Connection options for on-premise PostgreSQL - */ + /** Connection options for on-premise PostgreSQL */ onPremise?: OnPremisePostgres | undefined; } export interface PostgresSource { $type: "yandex.cloud.datatransfer.v1.endpoint.PostgresSource"; - /** - * Connection settings - * - * Database connection settings - */ + /** Database connection settings */ connection?: PostgresConnection; /** Security groups */ securityGroups: string[]; /** Database name */ database: string; - /** - * Username - * - * User for database access. - */ + /** User for database access. */ user: string; - /** - * Password - * - * Password for database access. - */ + /** Password for database access. */ password?: Secret; /** * Included tables @@ -199,57 +169,34 @@ export interface PostgresSource { */ excludeTables: string[]; /** - * Maximum WAL size for the replication slot - * - * Maximum WAL size held by the replication slot. Exceeding this limit will result - * in a replication failure and deletion of the replication slot. Unlimited by - * default. + * Maximum lag of replication slot (in bytes); after exceeding this limit + * replication will be aborted. */ slotByteLagLimit: number; /** - * Database schema for service tables - * - * Default: public. Here created technical tables (__consumer_keeper, - * __data_transfer_mole_finder). + * Database schema for service tables (__consumer_keeper, + * __data_transfer_mole_finder). Default is public */ serviceSchema: string; - /** - * Schema migration - * - * Select database objects to be transferred during activation or deactivation. - */ + /** Select database objects to be transferred during activation or deactivation. */ objectTransferSettings?: PostgresObjectTransferSettings; } export interface PostgresTarget { $type: "yandex.cloud.datatransfer.v1.endpoint.PostgresTarget"; - /** - * Connection settings - * - * Database connection settings - */ + /** Database connection settings */ connection?: PostgresConnection; /** Security groups */ securityGroups: string[]; /** Database name */ database: string; - /** - * Username - * - * User for database access. - */ + /** User for database access. */ user: string; - /** - * Password - * - * Password for database access. - */ + /** Password for database access. */ password?: Secret; /** - * Cleanup policy - * * Cleanup policy for activate, reactivate and reupload processes. Default is - * DISABLED. + * truncate. */ cleanupPolicy: CleanupPolicy; } @@ -258,6 +205,7 @@ const basePostgresObjectTransferSettings: object = { $type: "yandex.cloud.datatransfer.v1.endpoint.PostgresObjectTransferSettings", sequence: 0, sequenceOwnedBy: 0, + sequenceSet: 0, table: 0, primaryKey: 0, fkConstraint: 0, @@ -265,6 +213,7 @@ const basePostgresObjectTransferSettings: object = { constraint: 0, index: 0, view: 0, + materializedView: 0, function: 0, trigger: 0, type: 0, @@ -272,7 +221,6 @@ const basePostgresObjectTransferSettings: object = { collation: 0, policy: 0, cast: 0, - materializedView: 0, }; export const PostgresObjectTransferSettings = { @@ -289,6 +237,9 @@ export const PostgresObjectTransferSettings = { if (message.sequenceOwnedBy !== 0) { writer.uint32(16).int32(message.sequenceOwnedBy); } + if (message.sequenceSet !== 0) { + writer.uint32(144).int32(message.sequenceSet); + } if (message.table !== 0) { writer.uint32(24).int32(message.table); } @@ -310,6 +261,9 @@ export const PostgresObjectTransferSettings = { if (message.view !== 0) { writer.uint32(72).int32(message.view); } + if (message.materializedView !== 0) { + writer.uint32(136).int32(message.materializedView); + } if (message.function !== 0) { writer.uint32(80).int32(message.function); } @@ -331,9 +285,6 @@ export const PostgresObjectTransferSettings = { if (message.cast !== 0) { writer.uint32(128).int32(message.cast); } - if (message.materializedView !== 0) { - writer.uint32(136).int32(message.materializedView); - } return writer; }, @@ -355,6 +306,9 @@ export const PostgresObjectTransferSettings = { case 2: message.sequenceOwnedBy = reader.int32() as any; break; + case 18: + message.sequenceSet = reader.int32() as any; + break; case 3: message.table = reader.int32() as any; break; @@ -376,6 +330,9 @@ export const PostgresObjectTransferSettings = { case 9: message.view = reader.int32() as any; break; + case 17: + message.materializedView = reader.int32() as any; + break; case 10: message.function = reader.int32() as any; break; @@ -397,9 +354,6 @@ export const PostgresObjectTransferSettings = { case 16: message.cast = reader.int32() as any; break; - case 17: - message.materializedView = reader.int32() as any; - break; default: reader.skipType(tag & 7); break; @@ -420,6 +374,10 @@ export const PostgresObjectTransferSettings = { object.sequenceOwnedBy !== undefined && object.sequenceOwnedBy !== null ? objectTransferStageFromJSON(object.sequenceOwnedBy) : 0; + message.sequenceSet = + object.sequenceSet !== undefined && object.sequenceSet !== null + ? objectTransferStageFromJSON(object.sequenceSet) + : 0; message.table = object.table !== undefined && object.table !== null ? objectTransferStageFromJSON(object.table) @@ -448,6 +406,10 @@ export const PostgresObjectTransferSettings = { object.view !== undefined && object.view !== null ? objectTransferStageFromJSON(object.view) : 0; + message.materializedView = + object.materializedView !== undefined && object.materializedView !== null + ? objectTransferStageFromJSON(object.materializedView) + : 0; message.function = object.function !== undefined && object.function !== null ? objectTransferStageFromJSON(object.function) @@ -476,10 +438,6 @@ export const PostgresObjectTransferSettings = { object.cast !== undefined && object.cast !== null ? objectTransferStageFromJSON(object.cast) : 0; - message.materializedView = - object.materializedView !== undefined && object.materializedView !== null - ? objectTransferStageFromJSON(object.materializedView) - : 0; return message; }, @@ -491,6 +449,8 @@ export const PostgresObjectTransferSettings = { (obj.sequenceOwnedBy = objectTransferStageToJSON( message.sequenceOwnedBy )); + message.sequenceSet !== undefined && + (obj.sequenceSet = objectTransferStageToJSON(message.sequenceSet)); message.table !== undefined && (obj.table = objectTransferStageToJSON(message.table)); message.primaryKey !== undefined && @@ -505,6 +465,10 @@ export const PostgresObjectTransferSettings = { (obj.index = objectTransferStageToJSON(message.index)); message.view !== undefined && (obj.view = objectTransferStageToJSON(message.view)); + message.materializedView !== undefined && + (obj.materializedView = objectTransferStageToJSON( + message.materializedView + )); message.function !== undefined && (obj.function = objectTransferStageToJSON(message.function)); message.trigger !== undefined && @@ -519,10 +483,6 @@ export const PostgresObjectTransferSettings = { (obj.policy = objectTransferStageToJSON(message.policy)); message.cast !== undefined && (obj.cast = objectTransferStageToJSON(message.cast)); - message.materializedView !== undefined && - (obj.materializedView = objectTransferStageToJSON( - message.materializedView - )); return obj; }, @@ -534,6 +494,7 @@ export const PostgresObjectTransferSettings = { } as PostgresObjectTransferSettings; message.sequence = object.sequence ?? 0; message.sequenceOwnedBy = object.sequenceOwnedBy ?? 0; + message.sequenceSet = object.sequenceSet ?? 0; message.table = object.table ?? 0; message.primaryKey = object.primaryKey ?? 0; message.fkConstraint = object.fkConstraint ?? 0; @@ -541,6 +502,7 @@ export const PostgresObjectTransferSettings = { message.constraint = object.constraint ?? 0; message.index = object.index ?? 0; message.view = object.view ?? 0; + message.materializedView = object.materializedView ?? 0; message.function = object.function ?? 0; message.trigger = object.trigger ?? 0; message.type = object.type ?? 0; @@ -548,7 +510,6 @@ export const PostgresObjectTransferSettings = { message.collation = object.collation ?? 0; message.policy = object.policy ?? 0; message.cast = object.cast ?? 0; - message.materializedView = object.materializedView ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/serializers.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/serializers.ts new file mode 100644 index 00000000..a4b383af --- /dev/null +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/serializers.ts @@ -0,0 +1,450 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; + +export interface SerializerAuto { + $type: "yandex.cloud.datatransfer.v1.endpoint.SerializerAuto"; +} + +export interface SerializerJSON { + $type: "yandex.cloud.datatransfer.v1.endpoint.SerializerJSON"; +} + +export interface DebeziumSerializerParameter { + $type: "yandex.cloud.datatransfer.v1.endpoint.DebeziumSerializerParameter"; + /** Name of the serializer parameter */ + key: string; + /** Value of the serializer parameter */ + value: string; +} + +export interface SerializerDebezium { + $type: "yandex.cloud.datatransfer.v1.endpoint.SerializerDebezium"; + /** Settings of sterilization parameters as key-value pairs */ + serializerParameters: DebeziumSerializerParameter[]; +} + +/** Data serialization format */ +export interface Serializer { + $type: "yandex.cloud.datatransfer.v1.endpoint.Serializer"; + /** Select the serialization format automatically */ + serializerAuto?: SerializerAuto | undefined; + /** Serialize data in json format */ + serializerJson?: SerializerJSON | undefined; + /** Serialize data in debezium format */ + serializerDebezium?: SerializerDebezium | undefined; +} + +const baseSerializerAuto: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.SerializerAuto", +}; + +export const SerializerAuto = { + $type: "yandex.cloud.datatransfer.v1.endpoint.SerializerAuto" as const, + + encode( + _: SerializerAuto, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SerializerAuto { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSerializerAuto } as SerializerAuto; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): SerializerAuto { + const message = { ...baseSerializerAuto } as SerializerAuto; + return message; + }, + + toJSON(_: SerializerAuto): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): SerializerAuto { + const message = { ...baseSerializerAuto } as SerializerAuto; + return message; + }, +}; + +messageTypeRegistry.set(SerializerAuto.$type, SerializerAuto); + +const baseSerializerJSON: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.SerializerJSON", +}; + +export const SerializerJSON = { + $type: "yandex.cloud.datatransfer.v1.endpoint.SerializerJSON" as const, + + encode( + _: SerializerJSON, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SerializerJSON { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSerializerJSON } as SerializerJSON; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): SerializerJSON { + const message = { ...baseSerializerJSON } as SerializerJSON; + return message; + }, + + toJSON(_: SerializerJSON): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): SerializerJSON { + const message = { ...baseSerializerJSON } as SerializerJSON; + return message; + }, +}; + +messageTypeRegistry.set(SerializerJSON.$type, SerializerJSON); + +const baseDebeziumSerializerParameter: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.DebeziumSerializerParameter", + key: "", + value: "", +}; + +export const DebeziumSerializerParameter = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.DebeziumSerializerParameter" as const, + + encode( + message: DebeziumSerializerParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DebeziumSerializerParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDebeziumSerializerParameter, + } as DebeziumSerializerParameter; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DebeziumSerializerParameter { + const message = { + ...baseDebeziumSerializerParameter, + } as DebeziumSerializerParameter; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: DebeziumSerializerParameter): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): DebeziumSerializerParameter { + const message = { + ...baseDebeziumSerializerParameter, + } as DebeziumSerializerParameter; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DebeziumSerializerParameter.$type, + DebeziumSerializerParameter +); + +const baseSerializerDebezium: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.SerializerDebezium", +}; + +export const SerializerDebezium = { + $type: "yandex.cloud.datatransfer.v1.endpoint.SerializerDebezium" as const, + + encode( + message: SerializerDebezium, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.serializerParameters) { + DebeziumSerializerParameter.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SerializerDebezium { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSerializerDebezium } as SerializerDebezium; + message.serializerParameters = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.serializerParameters.push( + DebeziumSerializerParameter.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SerializerDebezium { + const message = { ...baseSerializerDebezium } as SerializerDebezium; + message.serializerParameters = (object.serializerParameters ?? []).map( + (e: any) => DebeziumSerializerParameter.fromJSON(e) + ); + return message; + }, + + toJSON(message: SerializerDebezium): unknown { + const obj: any = {}; + if (message.serializerParameters) { + obj.serializerParameters = message.serializerParameters.map((e) => + e ? DebeziumSerializerParameter.toJSON(e) : undefined + ); + } else { + obj.serializerParameters = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): SerializerDebezium { + const message = { ...baseSerializerDebezium } as SerializerDebezium; + message.serializerParameters = + object.serializerParameters?.map((e) => + DebeziumSerializerParameter.fromPartial(e) + ) || []; + return message; + }, +}; + +messageTypeRegistry.set(SerializerDebezium.$type, SerializerDebezium); + +const baseSerializer: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.Serializer", +}; + +export const Serializer = { + $type: "yandex.cloud.datatransfer.v1.endpoint.Serializer" as const, + + encode( + message: Serializer, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.serializerAuto !== undefined) { + SerializerAuto.encode( + message.serializerAuto, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.serializerJson !== undefined) { + SerializerJSON.encode( + message.serializerJson, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.serializerDebezium !== undefined) { + SerializerDebezium.encode( + message.serializerDebezium, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Serializer { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSerializer } as Serializer; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.serializerAuto = SerializerAuto.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.serializerJson = SerializerJSON.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.serializerDebezium = SerializerDebezium.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Serializer { + const message = { ...baseSerializer } as Serializer; + message.serializerAuto = + object.serializerAuto !== undefined && object.serializerAuto !== null + ? SerializerAuto.fromJSON(object.serializerAuto) + : undefined; + message.serializerJson = + object.serializerJson !== undefined && object.serializerJson !== null + ? SerializerJSON.fromJSON(object.serializerJson) + : undefined; + message.serializerDebezium = + object.serializerDebezium !== undefined && + object.serializerDebezium !== null + ? SerializerDebezium.fromJSON(object.serializerDebezium) + : undefined; + return message; + }, + + toJSON(message: Serializer): unknown { + const obj: any = {}; + message.serializerAuto !== undefined && + (obj.serializerAuto = message.serializerAuto + ? SerializerAuto.toJSON(message.serializerAuto) + : undefined); + message.serializerJson !== undefined && + (obj.serializerJson = message.serializerJson + ? SerializerJSON.toJSON(message.serializerJson) + : undefined); + message.serializerDebezium !== undefined && + (obj.serializerDebezium = message.serializerDebezium + ? SerializerDebezium.toJSON(message.serializerDebezium) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Serializer { + const message = { ...baseSerializer } as Serializer; + message.serializerAuto = + object.serializerAuto !== undefined && object.serializerAuto !== null + ? SerializerAuto.fromPartial(object.serializerAuto) + : undefined; + message.serializerJson = + object.serializerJson !== undefined && object.serializerJson !== null + ? SerializerJSON.fromPartial(object.serializerJson) + : undefined; + message.serializerDebezium = + object.serializerDebezium !== undefined && + object.serializerDebezium !== null + ? SerializerDebezium.fromPartial(object.serializerDebezium) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Serializer.$type, Serializer); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/ydb.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/ydb.ts new file mode 100644 index 00000000..d36bf1c4 --- /dev/null +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/ydb.ts @@ -0,0 +1,419 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; + +export enum YdbCleanupPolicy { + YDB_CLEANUP_POLICY_UNSPECIFIED = 0, + YDB_CLEANUP_POLICY_DISABLED = 1, + YDB_CLEANUP_POLICY_DROP = 2, + UNRECOGNIZED = -1, +} + +export function ydbCleanupPolicyFromJSON(object: any): YdbCleanupPolicy { + switch (object) { + case 0: + case "YDB_CLEANUP_POLICY_UNSPECIFIED": + return YdbCleanupPolicy.YDB_CLEANUP_POLICY_UNSPECIFIED; + case 1: + case "YDB_CLEANUP_POLICY_DISABLED": + return YdbCleanupPolicy.YDB_CLEANUP_POLICY_DISABLED; + case 2: + case "YDB_CLEANUP_POLICY_DROP": + return YdbCleanupPolicy.YDB_CLEANUP_POLICY_DROP; + case -1: + case "UNRECOGNIZED": + default: + return YdbCleanupPolicy.UNRECOGNIZED; + } +} + +export function ydbCleanupPolicyToJSON(object: YdbCleanupPolicy): string { + switch (object) { + case YdbCleanupPolicy.YDB_CLEANUP_POLICY_UNSPECIFIED: + return "YDB_CLEANUP_POLICY_UNSPECIFIED"; + case YdbCleanupPolicy.YDB_CLEANUP_POLICY_DISABLED: + return "YDB_CLEANUP_POLICY_DISABLED"; + case YdbCleanupPolicy.YDB_CLEANUP_POLICY_DROP: + return "YDB_CLEANUP_POLICY_DROP"; + default: + return "UNKNOWN"; + } +} + +export interface YdbSource { + $type: "yandex.cloud.datatransfer.v1.endpoint.YdbSource"; + /** Path in YDB where to store tables */ + database: string; + /** Instance of YDB. example: ydb-ru-prestable.yandex.net:2135 */ + instance: string; + serviceAccountId: string; + paths: string[]; + /** Network interface for endpoint. If none will assume public ipv4 */ + subnetId: string; + /** Security groups */ + securityGroups: string[]; + /** Authorization Key */ + saKeyContent: string; +} + +export interface YdbTarget { + $type: "yandex.cloud.datatransfer.v1.endpoint.YdbTarget"; + /** Path in YDB where to store tables */ + database: string; + /** Instance of YDB. example: ydb-ru-prestable.yandex.net:2135 */ + instance: string; + serviceAccountId: string; + /** Path extension for database, each table will be layouted into this path */ + path: string; + /** Network interface for endpoint. If none will assume public ipv4 */ + subnetId: string; + /** Security groups */ + securityGroups: string[]; + /** SA content */ + saKeyContent: string; + /** Cleanup policy */ + cleanupPolicy: YdbCleanupPolicy; +} + +const baseYdbSource: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.YdbSource", + database: "", + instance: "", + serviceAccountId: "", + paths: "", + subnetId: "", + securityGroups: "", + saKeyContent: "", +}; + +export const YdbSource = { + $type: "yandex.cloud.datatransfer.v1.endpoint.YdbSource" as const, + + encode( + message: YdbSource, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.database !== "") { + writer.uint32(10).string(message.database); + } + if (message.instance !== "") { + writer.uint32(18).string(message.instance); + } + if (message.serviceAccountId !== "") { + writer.uint32(50).string(message.serviceAccountId); + } + for (const v of message.paths) { + writer.uint32(42).string(v!); + } + if (message.subnetId !== "") { + writer.uint32(242).string(message.subnetId); + } + for (const v of message.securityGroups) { + writer.uint32(274).string(v!); + } + if (message.saKeyContent !== "") { + writer.uint32(266).string(message.saKeyContent); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): YdbSource { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseYdbSource } as YdbSource; + message.paths = []; + message.securityGroups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.database = reader.string(); + break; + case 2: + message.instance = reader.string(); + break; + case 6: + message.serviceAccountId = reader.string(); + break; + case 5: + message.paths.push(reader.string()); + break; + case 30: + message.subnetId = reader.string(); + break; + case 34: + message.securityGroups.push(reader.string()); + break; + case 33: + message.saKeyContent = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): YdbSource { + const message = { ...baseYdbSource } as YdbSource; + message.database = + object.database !== undefined && object.database !== null + ? String(object.database) + : ""; + message.instance = + object.instance !== undefined && object.instance !== null + ? String(object.instance) + : ""; + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.paths = (object.paths ?? []).map((e: any) => String(e)); + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); + message.saKeyContent = + object.saKeyContent !== undefined && object.saKeyContent !== null + ? String(object.saKeyContent) + : ""; + return message; + }, + + toJSON(message: YdbSource): unknown { + const obj: any = {}; + message.database !== undefined && (obj.database = message.database); + message.instance !== undefined && (obj.instance = message.instance); + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + if (message.paths) { + obj.paths = message.paths.map((e) => e); + } else { + obj.paths = []; + } + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } + message.saKeyContent !== undefined && + (obj.saKeyContent = message.saKeyContent); + return obj; + }, + + fromPartial, I>>( + object: I + ): YdbSource { + const message = { ...baseYdbSource } as YdbSource; + message.database = object.database ?? ""; + message.instance = object.instance ?? ""; + message.serviceAccountId = object.serviceAccountId ?? ""; + message.paths = object.paths?.map((e) => e) || []; + message.subnetId = object.subnetId ?? ""; + message.securityGroups = object.securityGroups?.map((e) => e) || []; + message.saKeyContent = object.saKeyContent ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(YdbSource.$type, YdbSource); + +const baseYdbTarget: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.YdbTarget", + database: "", + instance: "", + serviceAccountId: "", + path: "", + subnetId: "", + securityGroups: "", + saKeyContent: "", + cleanupPolicy: 0, +}; + +export const YdbTarget = { + $type: "yandex.cloud.datatransfer.v1.endpoint.YdbTarget" as const, + + encode( + message: YdbTarget, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.database !== "") { + writer.uint32(10).string(message.database); + } + if (message.instance !== "") { + writer.uint32(18).string(message.instance); + } + if (message.serviceAccountId !== "") { + writer.uint32(90).string(message.serviceAccountId); + } + if (message.path !== "") { + writer.uint32(82).string(message.path); + } + if (message.subnetId !== "") { + writer.uint32(242).string(message.subnetId); + } + for (const v of message.securityGroups) { + writer.uint32(266).string(v!); + } + if (message.saKeyContent !== "") { + writer.uint32(258).string(message.saKeyContent); + } + if (message.cleanupPolicy !== 0) { + writer.uint32(168).int32(message.cleanupPolicy); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): YdbTarget { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseYdbTarget } as YdbTarget; + message.securityGroups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.database = reader.string(); + break; + case 2: + message.instance = reader.string(); + break; + case 11: + message.serviceAccountId = reader.string(); + break; + case 10: + message.path = reader.string(); + break; + case 30: + message.subnetId = reader.string(); + break; + case 33: + message.securityGroups.push(reader.string()); + break; + case 32: + message.saKeyContent = reader.string(); + break; + case 21: + message.cleanupPolicy = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): YdbTarget { + const message = { ...baseYdbTarget } as YdbTarget; + message.database = + object.database !== undefined && object.database !== null + ? String(object.database) + : ""; + message.instance = + object.instance !== undefined && object.instance !== null + ? String(object.instance) + : ""; + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.path = + object.path !== undefined && object.path !== null + ? String(object.path) + : ""; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); + message.saKeyContent = + object.saKeyContent !== undefined && object.saKeyContent !== null + ? String(object.saKeyContent) + : ""; + message.cleanupPolicy = + object.cleanupPolicy !== undefined && object.cleanupPolicy !== null + ? ydbCleanupPolicyFromJSON(object.cleanupPolicy) + : 0; + return message; + }, + + toJSON(message: YdbTarget): unknown { + const obj: any = {}; + message.database !== undefined && (obj.database = message.database); + message.instance !== undefined && (obj.instance = message.instance); + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.path !== undefined && (obj.path = message.path); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } + message.saKeyContent !== undefined && + (obj.saKeyContent = message.saKeyContent); + message.cleanupPolicy !== undefined && + (obj.cleanupPolicy = ydbCleanupPolicyToJSON(message.cleanupPolicy)); + return obj; + }, + + fromPartial, I>>( + object: I + ): YdbTarget { + const message = { ...baseYdbTarget } as YdbTarget; + message.database = object.database ?? ""; + message.instance = object.instance ?? ""; + message.serviceAccountId = object.serviceAccountId ?? ""; + message.path = object.path ?? ""; + message.subnetId = object.subnetId ?? ""; + message.securityGroups = object.securityGroups?.map((e) => e) || []; + message.saKeyContent = object.saKeyContent ?? ""; + message.cleanupPolicy = object.cleanupPolicy ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(YdbTarget.$type, YdbTarget); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts index d358258b..6ca4623e 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts @@ -34,16 +34,19 @@ export interface ListEndpointsRequest { folderId: string; /** * The maximum number of endpoints to be sent in the response message. If the - * folder contains more endpoints than page_size, next_page_token will be included - * in the response message. Include it into the subsequent ListEndpointRequest to - * fetch the next page. Defaults to 100 if not specified. The maximum allowed value - * for this field is 500. + * folder contains more endpoints than `page_size`, `next_page_token` will be + * included + * in the response message. Include it into the subsequent `ListEndpointRequest` to + * fetch the next page. Defaults to `100` if not specified. The maximum allowed + * value + * for this field is `500`. */ pageSize: number; /** * Opaque value identifying the endpoints page to be fetched. Should be empty in - * the first ListEndpointsRequest. Subsequent request should have this field filled - * with the next_page_token from the previous ListEndpointsResponse. + * the first `ListEndpointsRequest`. Subsequent requests should have this field + * filled + * with the `next_page_token` from the previous `ListEndpointsResponse`. */ pageToken: string; } @@ -52,14 +55,14 @@ export interface ListEndpointsResponse { $type: "yandex.cloud.datatransfer.v1.ListEndpointsResponse"; /** * The list of endpoints. If there are more endpoints in the folder, then - * next_page_token is a non-empty string to be included into the subsequent - * ListEndpointsRequest to fetch the next endpoints page. + * `next_page_token` is a non-empty string to be included into the subsequent + * `ListEndpointsRequest` to fetch the next endpoints page. */ endpoints: Endpoint[]; /** * Opaque value identifying the next endpoints page. This field is empty if there - * are no more endpoints in the folder. Otherwise it is non-empty and should be - * included in the subsequent ListEndpointsRequest to fetch the next endpoints + * are no more endpoints in the folder. Otherwise, it is non-empty and should be + * included in the subsequent `ListEndpointsRequest` to fetch the next endpoints * page. */ nextPageToken: string; @@ -99,8 +102,8 @@ export interface UpdateEndpointRequest { /** * Field mask specifying endpoint fields to be updated. Semantics for this field is * described here: - * https://pkg.go.dev/google.golang.org/protobuf/types/known/fieldmaskpb#FieldMask - * The only exception is that if the repeated field is specified in the mask, then + * + * The only exception: if the repeated field is specified in the mask, then * the new value replaces the old one instead of being appended to the old one. */ updateMask?: FieldMask; diff --git a/src/generated/yandex/cloud/datatransfer/v1/transfer.ts b/src/generated/yandex/cloud/datatransfer/v1/transfer.ts index 95d6c067..8df44cb8 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/transfer.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/transfer.ts @@ -55,13 +55,21 @@ export function transferTypeToJSON(object: TransferType): string { export enum TransferStatus { TRANSFER_STATUS_UNSPECIFIED = 0, + /** CREATING - Transfer does some work before running */ CREATING = 1, + /** CREATED - Transfer created but not started by user */ CREATED = 2, + /** RUNNING - Transfer currently doing replication work */ RUNNING = 3, + /** STOPPING - Transfer shutdown */ STOPPING = 4, + /** STOPPED - Transfer stopped by user */ STOPPED = 5, + /** ERROR - Transfer stopped by system */ ERROR = 6, + /** SNAPSHOTTING - Transfer copy snapshot */ SNAPSHOTTING = 7, + /** DONE - Transfer reach terminal phase */ DONE = 8, UNRECOGNIZED = -1, } @@ -127,6 +135,7 @@ export function transferStatusToJSON(object: TransferStatus): string { } } +/** Transfer core entity */ export interface Transfer { $type: "yandex.cloud.datatransfer.v1.Transfer"; id: string; diff --git a/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts b/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts index 902b07ea..645886db 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts @@ -59,8 +59,8 @@ export interface UpdateTransferRequest { /** * Field mask specifying transfer fields to be updated. Semantics for this field is * described here: - * https://pkg.go.dev/google.golang.org/protobuf/types/known/fieldmaskpb#FieldMask - * The only exception is that if the repeated field is specified in the mask, then + * + * The only exception: if the repeated field is specified in the mask, then * the new value replaces the old one instead of being appended to the old one. */ updateMask?: FieldMask; @@ -93,16 +93,20 @@ export interface ListTransfersRequest { folderId: string; /** * The maximum number of transfers to be sent in the response message. If the - * folder contains more transfers than page_size, next_page_token will be included - * in the response message. Include it into the subsequent ListTransfersRequest to - * fetch the next page. Defaults to 100 if not specified. The maximum allowed value - * for this field is 500. + * folder contains more transfers than `page_size`, `next_page_token` will be + * included + * in the response message. Include it into the subsequent `ListTransfersRequest` + * to + * fetch the next page. Defaults to `100` if not specified. The maximum allowed + * value + * for this field is `500`. */ pageSize: number; /** * Opaque value identifying the transfers page to be fetched. Should be empty in - * the first ListTransfersRequest. Subsequent request should have this field filled - * with the next_page_token from the previous ListTransfersResponse. + * the first `ListTransfersRequest`. Subsequent requests should have this field + * filled + * with the `next_page_token` from the previous `ListTransfersResponse`. */ pageToken: string; } @@ -111,14 +115,14 @@ export interface ListTransfersResponse { $type: "yandex.cloud.datatransfer.v1.ListTransfersResponse"; /** * The list of transfers. If there are more transfers in the folder, then - * next_page_token is a non-empty string to be included into the subsequent - * ListTransfersRequest to fetch the next transfers page. + * `next_page_token` is a non-empty string to be included into the subsequent + * `ListTransfersRequest` to fetch the next transfers page. */ transfers: Transfer[]; /** * Opaque value identifying the next transfers page. This field is empty if there * are no more transfers in the folder. Otherwise it is non-empty and should be - * included in the subsequent ListTransfersRequest to fetch the next transfers + * included in the subsequent `ListTransfersRequest` to fetch the next transfers * page. */ nextPageToken: string; diff --git a/src/generated/yandex/cloud/dns/v1/dns_zone_service.ts b/src/generated/yandex/cloud/dns/v1/dns_zone_service.ts index 73c8f110..33bf2619 100644 --- a/src/generated/yandex/cloud/dns/v1/dns_zone_service.ts +++ b/src/generated/yandex/cloud/dns/v1/dns_zone_service.ts @@ -31,6 +31,22 @@ import { export const protobufPackage = "yandex.cloud.dns.v1"; +export interface UpdateDnsZonePrivateNetworksRequest { + $type: "yandex.cloud.dns.v1.UpdateDnsZonePrivateNetworksRequest"; + /** ID of the DNS zone which private networks will be updated */ + dnsZoneId: string; + /** Network IDs to remove */ + privateNetworkIdAdditions: string[]; + /** Network IDs to add */ + privateNetworkIdDeletions: string[]; +} + +export interface UpdateDnsZonePrivateNetworksMetadata { + $type: "yandex.cloud.dns.v1.UpdateDnsZonePrivateNetworksMetadata"; + /** ID of the DNS zone which private networks was updated */ + dnsZoneId: string; +} + export interface GetDnsZoneRequest { $type: "yandex.cloud.dns.v1.GetDnsZoneRequest"; /** @@ -343,6 +359,194 @@ export interface ListDnsZoneOperationsResponse { nextPageToken: string; } +const baseUpdateDnsZonePrivateNetworksRequest: object = { + $type: "yandex.cloud.dns.v1.UpdateDnsZonePrivateNetworksRequest", + dnsZoneId: "", + privateNetworkIdAdditions: "", + privateNetworkIdDeletions: "", +}; + +export const UpdateDnsZonePrivateNetworksRequest = { + $type: "yandex.cloud.dns.v1.UpdateDnsZonePrivateNetworksRequest" as const, + + encode( + message: UpdateDnsZonePrivateNetworksRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dnsZoneId !== "") { + writer.uint32(10).string(message.dnsZoneId); + } + for (const v of message.privateNetworkIdAdditions) { + writer.uint32(18).string(v!); + } + for (const v of message.privateNetworkIdDeletions) { + writer.uint32(26).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateDnsZonePrivateNetworksRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateDnsZonePrivateNetworksRequest, + } as UpdateDnsZonePrivateNetworksRequest; + message.privateNetworkIdAdditions = []; + message.privateNetworkIdDeletions = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dnsZoneId = reader.string(); + break; + case 2: + message.privateNetworkIdAdditions.push(reader.string()); + break; + case 3: + message.privateNetworkIdDeletions.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateDnsZonePrivateNetworksRequest { + const message = { + ...baseUpdateDnsZonePrivateNetworksRequest, + } as UpdateDnsZonePrivateNetworksRequest; + message.dnsZoneId = + object.dnsZoneId !== undefined && object.dnsZoneId !== null + ? String(object.dnsZoneId) + : ""; + message.privateNetworkIdAdditions = ( + object.privateNetworkIdAdditions ?? [] + ).map((e: any) => String(e)); + message.privateNetworkIdDeletions = ( + object.privateNetworkIdDeletions ?? [] + ).map((e: any) => String(e)); + return message; + }, + + toJSON(message: UpdateDnsZonePrivateNetworksRequest): unknown { + const obj: any = {}; + message.dnsZoneId !== undefined && (obj.dnsZoneId = message.dnsZoneId); + if (message.privateNetworkIdAdditions) { + obj.privateNetworkIdAdditions = message.privateNetworkIdAdditions.map( + (e) => e + ); + } else { + obj.privateNetworkIdAdditions = []; + } + if (message.privateNetworkIdDeletions) { + obj.privateNetworkIdDeletions = message.privateNetworkIdDeletions.map( + (e) => e + ); + } else { + obj.privateNetworkIdDeletions = []; + } + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateDnsZonePrivateNetworksRequest { + const message = { + ...baseUpdateDnsZonePrivateNetworksRequest, + } as UpdateDnsZonePrivateNetworksRequest; + message.dnsZoneId = object.dnsZoneId ?? ""; + message.privateNetworkIdAdditions = + object.privateNetworkIdAdditions?.map((e) => e) || []; + message.privateNetworkIdDeletions = + object.privateNetworkIdDeletions?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateDnsZonePrivateNetworksRequest.$type, + UpdateDnsZonePrivateNetworksRequest +); + +const baseUpdateDnsZonePrivateNetworksMetadata: object = { + $type: "yandex.cloud.dns.v1.UpdateDnsZonePrivateNetworksMetadata", + dnsZoneId: "", +}; + +export const UpdateDnsZonePrivateNetworksMetadata = { + $type: "yandex.cloud.dns.v1.UpdateDnsZonePrivateNetworksMetadata" as const, + + encode( + message: UpdateDnsZonePrivateNetworksMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dnsZoneId !== "") { + writer.uint32(10).string(message.dnsZoneId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateDnsZonePrivateNetworksMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateDnsZonePrivateNetworksMetadata, + } as UpdateDnsZonePrivateNetworksMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dnsZoneId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateDnsZonePrivateNetworksMetadata { + const message = { + ...baseUpdateDnsZonePrivateNetworksMetadata, + } as UpdateDnsZonePrivateNetworksMetadata; + message.dnsZoneId = + object.dnsZoneId !== undefined && object.dnsZoneId !== null + ? String(object.dnsZoneId) + : ""; + return message; + }, + + toJSON(message: UpdateDnsZonePrivateNetworksMetadata): unknown { + const obj: any = {}; + message.dnsZoneId !== undefined && (obj.dnsZoneId = message.dnsZoneId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateDnsZonePrivateNetworksMetadata { + const message = { + ...baseUpdateDnsZonePrivateNetworksMetadata, + } as UpdateDnsZonePrivateNetworksMetadata; + message.dnsZoneId = object.dnsZoneId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateDnsZonePrivateNetworksMetadata.$type, + UpdateDnsZonePrivateNetworksMetadata +); + const baseGetDnsZoneRequest: object = { $type: "yandex.cloud.dns.v1.GetDnsZoneRequest", dnsZoneId: "", @@ -2561,6 +2765,19 @@ export const DnsZoneServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Atomically updates zone private networks */ + updatePrivateNetworks: { + path: "/yandex.cloud.dns.v1.DnsZoneService/UpdatePrivateNetworks", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateDnsZonePrivateNetworksRequest) => + Buffer.from(UpdateDnsZonePrivateNetworksRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateDnsZonePrivateNetworksRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, } as const; export interface DnsZoneServiceServer extends UntypedServiceImplementation { @@ -2613,6 +2830,11 @@ export interface DnsZoneServiceServer extends UntypedServiceImplementation { setAccessBindings: handleUnaryCall; /** Updates access bindings for the specified DNS zone. */ updateAccessBindings: handleUnaryCall; + /** Atomically updates zone private networks */ + updatePrivateNetworks: handleUnaryCall< + UpdateDnsZonePrivateNetworksRequest, + Operation + >; } export interface DnsZoneServiceClient extends Client { @@ -2874,6 +3096,22 @@ export interface DnsZoneServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Atomically updates zone private networks */ + updatePrivateNetworks( + request: UpdateDnsZonePrivateNetworksRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updatePrivateNetworks( + request: UpdateDnsZonePrivateNetworksRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updatePrivateNetworks( + request: UpdateDnsZonePrivateNetworksRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; } export const DnsZoneServiceClient = makeGenericClientConstructor( diff --git a/src/generated/yandex/cloud/iam/v1/user_account.ts b/src/generated/yandex/cloud/iam/v1/user_account.ts index 812d1d41..677e58eb 100644 --- a/src/generated/yandex/cloud/iam/v1/user_account.ts +++ b/src/generated/yandex/cloud/iam/v1/user_account.ts @@ -30,7 +30,7 @@ export interface YandexPassportUserAccount { /** * A SAML federated user. - * For more information, see [federations](/docs/iam/concepts/users/saml-federations). + * For more information, see [federations](/docs/iam/concepts/federations). */ export interface SamlUserAccount { $type: "yandex.cloud.iam.v1.SamlUserAccount"; diff --git a/src/generated/yandex/cloud/index.ts b/src/generated/yandex/cloud/index.ts index c4005edb..62af102f 100644 --- a/src/generated/yandex/cloud/index.ts +++ b/src/generated/yandex/cloud/index.ts @@ -2,6 +2,7 @@ export * as access from './access/' export * as ai from './ai/' export * as api from './api/' export * as apploadbalancer from './apploadbalancer/' +export * as backup from './backup/' export * as billing from './billing/' export * as cdn from './cdn/' export * as certificatemanager from './certificatemanager/' @@ -17,6 +18,7 @@ export * as iot from './iot/' export * as k8s from './k8s/' export * as kms from './kms/' export * as loadbalancer from './loadbalancer/' +export * as loadtesting from './loadtesting/' export * as lockbox from './lockbox/' export * as logging from './logging/' export * as marketplace from './marketplace/' diff --git a/src/generated/yandex/cloud/iot/broker/v1/broker_data_service.ts b/src/generated/yandex/cloud/iot/broker/v1/broker_data_service.ts index 4854b4e6..33194542 100644 --- a/src/generated/yandex/cloud/iot/broker/v1/broker_data_service.ts +++ b/src/generated/yandex/cloud/iot/broker/v1/broker_data_service.ts @@ -197,7 +197,7 @@ messageTypeRegistry.set( /** A set of methods to work with IoT Core messages on behalf of broker */ export const BrokerDataServiceService = { - /** Publishes message on behalf of specified registry */ + /** Publishes message on behalf of specified broker */ publish: { path: "/yandex.cloud.iot.broker.v1.BrokerDataService/Publish", requestStream: false, @@ -214,12 +214,12 @@ export const BrokerDataServiceService = { } as const; export interface BrokerDataServiceServer extends UntypedServiceImplementation { - /** Publishes message on behalf of specified registry */ + /** Publishes message on behalf of specified broker */ publish: handleUnaryCall; } export interface BrokerDataServiceClient extends Client { - /** Publishes message on behalf of specified registry */ + /** Publishes message on behalf of specified broker */ publish( request: PublishBrokerDataRequest, callback: ( diff --git a/src/generated/yandex/cloud/iot/devices/v1/registry.ts b/src/generated/yandex/cloud/iot/devices/v1/registry.ts index f00aca23..9a5d781a 100644 --- a/src/generated/yandex/cloud/iot/devices/v1/registry.ts +++ b/src/generated/yandex/cloud/iot/devices/v1/registry.ts @@ -119,6 +119,27 @@ export interface RegistryPassword { createdAt?: Date; } +/** A Yandex Data Streams export. */ +export interface DataStreamExport { + $type: "yandex.cloud.iot.devices.v1.DataStreamExport"; + /** ID of the YDS export. */ + id: string; + /** Name of the YDS export. */ + name: string; + /** ID of the registry that the YDS export belongs to. */ + registryId: string; + /** MQTT topic whose messages export to YDS. */ + mqttTopicFilter: string; + /** YDS database. */ + database: string; + /** YDS stream name. */ + stream: string; + /** ID of the service account which has permission to write to data stream. */ + serviceAccountId: string; + /** Creation timestamp. */ + createdAt?: Date; +} + const baseRegistry: object = { $type: "yandex.cloud.iot.devices.v1.Registry", id: "", @@ -663,6 +684,164 @@ export const RegistryPassword = { messageTypeRegistry.set(RegistryPassword.$type, RegistryPassword); +const baseDataStreamExport: object = { + $type: "yandex.cloud.iot.devices.v1.DataStreamExport", + id: "", + name: "", + registryId: "", + mqttTopicFilter: "", + database: "", + stream: "", + serviceAccountId: "", +}; + +export const DataStreamExport = { + $type: "yandex.cloud.iot.devices.v1.DataStreamExport" as const, + + encode( + message: DataStreamExport, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.registryId !== "") { + writer.uint32(26).string(message.registryId); + } + if (message.mqttTopicFilter !== "") { + writer.uint32(34).string(message.mqttTopicFilter); + } + if (message.database !== "") { + writer.uint32(42).string(message.database); + } + if (message.stream !== "") { + writer.uint32(50).string(message.stream); + } + if (message.serviceAccountId !== "") { + writer.uint32(58).string(message.serviceAccountId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(66).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DataStreamExport { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDataStreamExport } as DataStreamExport; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.registryId = reader.string(); + break; + case 4: + message.mqttTopicFilter = reader.string(); + break; + case 5: + message.database = reader.string(); + break; + case 6: + message.stream = reader.string(); + break; + case 7: + message.serviceAccountId = reader.string(); + break; + case 8: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DataStreamExport { + const message = { ...baseDataStreamExport } as DataStreamExport; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.registryId = + object.registryId !== undefined && object.registryId !== null + ? String(object.registryId) + : ""; + message.mqttTopicFilter = + object.mqttTopicFilter !== undefined && object.mqttTopicFilter !== null + ? String(object.mqttTopicFilter) + : ""; + message.database = + object.database !== undefined && object.database !== null + ? String(object.database) + : ""; + message.stream = + object.stream !== undefined && object.stream !== null + ? String(object.stream) + : ""; + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + return message; + }, + + toJSON(message: DataStreamExport): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.name !== undefined && (obj.name = message.name); + message.registryId !== undefined && (obj.registryId = message.registryId); + message.mqttTopicFilter !== undefined && + (obj.mqttTopicFilter = message.mqttTopicFilter); + message.database !== undefined && (obj.database = message.database); + message.stream !== undefined && (obj.stream = message.stream); + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): DataStreamExport { + const message = { ...baseDataStreamExport } as DataStreamExport; + message.id = object.id ?? ""; + message.name = object.name ?? ""; + message.registryId = object.registryId ?? ""; + message.mqttTopicFilter = object.mqttTopicFilter ?? ""; + message.database = object.database ?? ""; + message.stream = object.stream ?? ""; + message.serviceAccountId = object.serviceAccountId ?? ""; + message.createdAt = object.createdAt ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(DataStreamExport.$type, DataStreamExport); + type Builtin = | Date | Function diff --git a/src/generated/yandex/cloud/iot/devices/v1/registry_service.ts b/src/generated/yandex/cloud/iot/devices/v1/registry_service.ts index c1576217..1022db04 100644 --- a/src/generated/yandex/cloud/iot/devices/v1/registry_service.ts +++ b/src/generated/yandex/cloud/iot/devices/v1/registry_service.ts @@ -20,6 +20,7 @@ import { RegistryCertificate, RegistryPassword, DeviceAlias, + DataStreamExport, } from "../../../../../yandex/cloud/iot/devices/v1/registry"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; @@ -373,6 +374,66 @@ export interface ListRegistryOperationsResponse { nextPageToken: string; } +export interface AddDataStreamExportRequest { + $type: "yandex.cloud.iot.devices.v1.AddDataStreamExportRequest"; + /** Name of the YDS export. The name must be unique within the folder. */ + name: string; + /** + * ID of the registry to add a YDS export for. + * + * To get a registry ID make a [RegistryService.List] request. + */ + registryId: string; + /** MQTT topic whose messages export to YDS. */ + mqttTopicFilter: string; + /** YDS database. */ + database: string; + /** YDS stream name. */ + stream: string; + /** ID of the service account which has permission to write to data stream. */ + serviceAccountId: string; +} + +export interface AddDataStreamExportMetadata { + $type: "yandex.cloud.iot.devices.v1.AddDataStreamExportMetadata"; + /** ID of the registry for which the YDS export was added. */ + registryId: string; + /** ID of the added YDS export. */ + dataStreamExportId: string; +} + +export interface DeleteDataStreamExportRequest { + $type: "yandex.cloud.iot.devices.v1.DeleteDataStreamExportRequest"; + /** ID of a registry for which the YDS export is being deleted. */ + registryId: string; + /** ID of the YDS export to delete. */ + dataStreamExportId: string; +} + +export interface DeleteDataStreamExportMetadata { + $type: "yandex.cloud.iot.devices.v1.DeleteDataStreamExportMetadata"; + /** ID of a registry for which the YDS export was deleted. */ + registryId: string; + /** ID of the deleted YDS export. */ + dataStreamExportId: string; +} + +export interface ListDataStreamExportsRequest { + $type: "yandex.cloud.iot.devices.v1.ListDataStreamExportsRequest"; + /** + * ID of the registry to list YDS exports in. + * + * To get a registry ID make a [RegistryService.List] request. + */ + registryId: string; +} + +export interface ListDataStreamExportsResponse { + $type: "yandex.cloud.iot.devices.v1.ListDataStreamExportsResponse"; + /** List of YDS exports for the specified registry. */ + dataStreamExports: DataStreamExport[]; +} + const baseGetRegistryRequest: object = { $type: "yandex.cloud.iot.devices.v1.GetRegistryRequest", registryId: "", @@ -2949,6 +3010,571 @@ messageTypeRegistry.set( ListRegistryOperationsResponse ); +const baseAddDataStreamExportRequest: object = { + $type: "yandex.cloud.iot.devices.v1.AddDataStreamExportRequest", + name: "", + registryId: "", + mqttTopicFilter: "", + database: "", + stream: "", + serviceAccountId: "", +}; + +export const AddDataStreamExportRequest = { + $type: "yandex.cloud.iot.devices.v1.AddDataStreamExportRequest" as const, + + encode( + message: AddDataStreamExportRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.registryId !== "") { + writer.uint32(18).string(message.registryId); + } + if (message.mqttTopicFilter !== "") { + writer.uint32(34).string(message.mqttTopicFilter); + } + if (message.database !== "") { + writer.uint32(42).string(message.database); + } + if (message.stream !== "") { + writer.uint32(50).string(message.stream); + } + if (message.serviceAccountId !== "") { + writer.uint32(58).string(message.serviceAccountId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AddDataStreamExportRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAddDataStreamExportRequest, + } as AddDataStreamExportRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.registryId = reader.string(); + break; + case 4: + message.mqttTopicFilter = reader.string(); + break; + case 5: + message.database = reader.string(); + break; + case 6: + message.stream = reader.string(); + break; + case 7: + message.serviceAccountId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddDataStreamExportRequest { + const message = { + ...baseAddDataStreamExportRequest, + } as AddDataStreamExportRequest; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.registryId = + object.registryId !== undefined && object.registryId !== null + ? String(object.registryId) + : ""; + message.mqttTopicFilter = + object.mqttTopicFilter !== undefined && object.mqttTopicFilter !== null + ? String(object.mqttTopicFilter) + : ""; + message.database = + object.database !== undefined && object.database !== null + ? String(object.database) + : ""; + message.stream = + object.stream !== undefined && object.stream !== null + ? String(object.stream) + : ""; + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + return message; + }, + + toJSON(message: AddDataStreamExportRequest): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.registryId !== undefined && (obj.registryId = message.registryId); + message.mqttTopicFilter !== undefined && + (obj.mqttTopicFilter = message.mqttTopicFilter); + message.database !== undefined && (obj.database = message.database); + message.stream !== undefined && (obj.stream = message.stream); + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddDataStreamExportRequest { + const message = { + ...baseAddDataStreamExportRequest, + } as AddDataStreamExportRequest; + message.name = object.name ?? ""; + message.registryId = object.registryId ?? ""; + message.mqttTopicFilter = object.mqttTopicFilter ?? ""; + message.database = object.database ?? ""; + message.stream = object.stream ?? ""; + message.serviceAccountId = object.serviceAccountId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AddDataStreamExportRequest.$type, + AddDataStreamExportRequest +); + +const baseAddDataStreamExportMetadata: object = { + $type: "yandex.cloud.iot.devices.v1.AddDataStreamExportMetadata", + registryId: "", + dataStreamExportId: "", +}; + +export const AddDataStreamExportMetadata = { + $type: "yandex.cloud.iot.devices.v1.AddDataStreamExportMetadata" as const, + + encode( + message: AddDataStreamExportMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.registryId !== "") { + writer.uint32(10).string(message.registryId); + } + if (message.dataStreamExportId !== "") { + writer.uint32(18).string(message.dataStreamExportId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AddDataStreamExportMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAddDataStreamExportMetadata, + } as AddDataStreamExportMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.registryId = reader.string(); + break; + case 2: + message.dataStreamExportId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddDataStreamExportMetadata { + const message = { + ...baseAddDataStreamExportMetadata, + } as AddDataStreamExportMetadata; + message.registryId = + object.registryId !== undefined && object.registryId !== null + ? String(object.registryId) + : ""; + message.dataStreamExportId = + object.dataStreamExportId !== undefined && + object.dataStreamExportId !== null + ? String(object.dataStreamExportId) + : ""; + return message; + }, + + toJSON(message: AddDataStreamExportMetadata): unknown { + const obj: any = {}; + message.registryId !== undefined && (obj.registryId = message.registryId); + message.dataStreamExportId !== undefined && + (obj.dataStreamExportId = message.dataStreamExportId); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddDataStreamExportMetadata { + const message = { + ...baseAddDataStreamExportMetadata, + } as AddDataStreamExportMetadata; + message.registryId = object.registryId ?? ""; + message.dataStreamExportId = object.dataStreamExportId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AddDataStreamExportMetadata.$type, + AddDataStreamExportMetadata +); + +const baseDeleteDataStreamExportRequest: object = { + $type: "yandex.cloud.iot.devices.v1.DeleteDataStreamExportRequest", + registryId: "", + dataStreamExportId: "", +}; + +export const DeleteDataStreamExportRequest = { + $type: "yandex.cloud.iot.devices.v1.DeleteDataStreamExportRequest" as const, + + encode( + message: DeleteDataStreamExportRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.registryId !== "") { + writer.uint32(10).string(message.registryId); + } + if (message.dataStreamExportId !== "") { + writer.uint32(18).string(message.dataStreamExportId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteDataStreamExportRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteDataStreamExportRequest, + } as DeleteDataStreamExportRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.registryId = reader.string(); + break; + case 2: + message.dataStreamExportId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteDataStreamExportRequest { + const message = { + ...baseDeleteDataStreamExportRequest, + } as DeleteDataStreamExportRequest; + message.registryId = + object.registryId !== undefined && object.registryId !== null + ? String(object.registryId) + : ""; + message.dataStreamExportId = + object.dataStreamExportId !== undefined && + object.dataStreamExportId !== null + ? String(object.dataStreamExportId) + : ""; + return message; + }, + + toJSON(message: DeleteDataStreamExportRequest): unknown { + const obj: any = {}; + message.registryId !== undefined && (obj.registryId = message.registryId); + message.dataStreamExportId !== undefined && + (obj.dataStreamExportId = message.dataStreamExportId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteDataStreamExportRequest { + const message = { + ...baseDeleteDataStreamExportRequest, + } as DeleteDataStreamExportRequest; + message.registryId = object.registryId ?? ""; + message.dataStreamExportId = object.dataStreamExportId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteDataStreamExportRequest.$type, + DeleteDataStreamExportRequest +); + +const baseDeleteDataStreamExportMetadata: object = { + $type: "yandex.cloud.iot.devices.v1.DeleteDataStreamExportMetadata", + registryId: "", + dataStreamExportId: "", +}; + +export const DeleteDataStreamExportMetadata = { + $type: "yandex.cloud.iot.devices.v1.DeleteDataStreamExportMetadata" as const, + + encode( + message: DeleteDataStreamExportMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.registryId !== "") { + writer.uint32(10).string(message.registryId); + } + if (message.dataStreamExportId !== "") { + writer.uint32(18).string(message.dataStreamExportId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteDataStreamExportMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteDataStreamExportMetadata, + } as DeleteDataStreamExportMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.registryId = reader.string(); + break; + case 2: + message.dataStreamExportId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteDataStreamExportMetadata { + const message = { + ...baseDeleteDataStreamExportMetadata, + } as DeleteDataStreamExportMetadata; + message.registryId = + object.registryId !== undefined && object.registryId !== null + ? String(object.registryId) + : ""; + message.dataStreamExportId = + object.dataStreamExportId !== undefined && + object.dataStreamExportId !== null + ? String(object.dataStreamExportId) + : ""; + return message; + }, + + toJSON(message: DeleteDataStreamExportMetadata): unknown { + const obj: any = {}; + message.registryId !== undefined && (obj.registryId = message.registryId); + message.dataStreamExportId !== undefined && + (obj.dataStreamExportId = message.dataStreamExportId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteDataStreamExportMetadata { + const message = { + ...baseDeleteDataStreamExportMetadata, + } as DeleteDataStreamExportMetadata; + message.registryId = object.registryId ?? ""; + message.dataStreamExportId = object.dataStreamExportId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteDataStreamExportMetadata.$type, + DeleteDataStreamExportMetadata +); + +const baseListDataStreamExportsRequest: object = { + $type: "yandex.cloud.iot.devices.v1.ListDataStreamExportsRequest", + registryId: "", +}; + +export const ListDataStreamExportsRequest = { + $type: "yandex.cloud.iot.devices.v1.ListDataStreamExportsRequest" as const, + + encode( + message: ListDataStreamExportsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.registryId !== "") { + writer.uint32(10).string(message.registryId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListDataStreamExportsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListDataStreamExportsRequest, + } as ListDataStreamExportsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.registryId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListDataStreamExportsRequest { + const message = { + ...baseListDataStreamExportsRequest, + } as ListDataStreamExportsRequest; + message.registryId = + object.registryId !== undefined && object.registryId !== null + ? String(object.registryId) + : ""; + return message; + }, + + toJSON(message: ListDataStreamExportsRequest): unknown { + const obj: any = {}; + message.registryId !== undefined && (obj.registryId = message.registryId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListDataStreamExportsRequest { + const message = { + ...baseListDataStreamExportsRequest, + } as ListDataStreamExportsRequest; + message.registryId = object.registryId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListDataStreamExportsRequest.$type, + ListDataStreamExportsRequest +); + +const baseListDataStreamExportsResponse: object = { + $type: "yandex.cloud.iot.devices.v1.ListDataStreamExportsResponse", +}; + +export const ListDataStreamExportsResponse = { + $type: "yandex.cloud.iot.devices.v1.ListDataStreamExportsResponse" as const, + + encode( + message: ListDataStreamExportsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.dataStreamExports) { + DataStreamExport.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListDataStreamExportsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListDataStreamExportsResponse, + } as ListDataStreamExportsResponse; + message.dataStreamExports = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dataStreamExports.push( + DataStreamExport.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListDataStreamExportsResponse { + const message = { + ...baseListDataStreamExportsResponse, + } as ListDataStreamExportsResponse; + message.dataStreamExports = (object.dataStreamExports ?? []).map((e: any) => + DataStreamExport.fromJSON(e) + ); + return message; + }, + + toJSON(message: ListDataStreamExportsResponse): unknown { + const obj: any = {}; + if (message.dataStreamExports) { + obj.dataStreamExports = message.dataStreamExports.map((e) => + e ? DataStreamExport.toJSON(e) : undefined + ); + } else { + obj.dataStreamExports = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ListDataStreamExportsResponse { + const message = { + ...baseListDataStreamExportsResponse, + } as ListDataStreamExportsResponse; + message.dataStreamExports = + object.dataStreamExports?.map((e) => DataStreamExport.fromPartial(e)) || + []; + return message; + }, +}; + +messageTypeRegistry.set( + ListDataStreamExportsResponse.$type, + ListDataStreamExportsResponse +); + /** A set of methods for managing registry. */ export const RegistryServiceService = { /** @@ -3122,6 +3748,46 @@ export const RegistryServiceService = { responseDeserialize: (value: Buffer) => ListDeviceTopicAliasesResponse.decode(value), }, + /** Retrieves the list of YDS exports for the specified registry. */ + listDataStreamExports: { + path: "/yandex.cloud.iot.devices.v1.RegistryService/ListDataStreamExports", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListDataStreamExportsRequest) => + Buffer.from(ListDataStreamExportsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListDataStreamExportsRequest.decode(value), + responseSerialize: (value: ListDataStreamExportsResponse) => + Buffer.from(ListDataStreamExportsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListDataStreamExportsResponse.decode(value), + }, + /** Adds YDS export for the specified registry. */ + addDataStreamExport: { + path: "/yandex.cloud.iot.devices.v1.RegistryService/AddDataStreamExport", + requestStream: false, + responseStream: false, + requestSerialize: (value: AddDataStreamExportRequest) => + Buffer.from(AddDataStreamExportRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + AddDataStreamExportRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified YDS export. */ + deleteDataStreamExport: { + path: "/yandex.cloud.iot.devices.v1.RegistryService/DeleteDataStreamExport", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteDataStreamExportRequest) => + Buffer.from(DeleteDataStreamExportRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteDataStreamExportRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Lists operations for the specified registry. */ listOperations: { path: "/yandex.cloud.iot.devices.v1.RegistryService/ListOperations", @@ -3180,6 +3846,18 @@ export interface RegistryServiceServer extends UntypedServiceImplementation { ListDeviceTopicAliasesRequest, ListDeviceTopicAliasesResponse >; + /** Retrieves the list of YDS exports for the specified registry. */ + listDataStreamExports: handleUnaryCall< + ListDataStreamExportsRequest, + ListDataStreamExportsResponse + >; + /** Adds YDS export for the specified registry. */ + addDataStreamExport: handleUnaryCall; + /** Deletes the specified YDS export. */ + deleteDataStreamExport: handleUnaryCall< + DeleteDataStreamExportRequest, + Operation + >; /** Lists operations for the specified registry. */ listOperations: handleUnaryCall< ListRegistryOperationsRequest, @@ -3435,6 +4113,63 @@ export interface RegistryServiceClient extends Client { response: ListDeviceTopicAliasesResponse ) => void ): ClientUnaryCall; + /** Retrieves the list of YDS exports for the specified registry. */ + listDataStreamExports( + request: ListDataStreamExportsRequest, + callback: ( + error: ServiceError | null, + response: ListDataStreamExportsResponse + ) => void + ): ClientUnaryCall; + listDataStreamExports( + request: ListDataStreamExportsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListDataStreamExportsResponse + ) => void + ): ClientUnaryCall; + listDataStreamExports( + request: ListDataStreamExportsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListDataStreamExportsResponse + ) => void + ): ClientUnaryCall; + /** Adds YDS export for the specified registry. */ + addDataStreamExport( + request: AddDataStreamExportRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + addDataStreamExport( + request: AddDataStreamExportRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + addDataStreamExport( + request: AddDataStreamExportRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified YDS export. */ + deleteDataStreamExport( + request: DeleteDataStreamExportRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteDataStreamExport( + request: DeleteDataStreamExportRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteDataStreamExport( + request: DeleteDataStreamExportRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Lists operations for the specified registry. */ listOperations( request: ListRegistryOperationsRequest, diff --git a/src/generated/yandex/cloud/k8s/v1/cluster.ts b/src/generated/yandex/cloud/k8s/v1/cluster.ts index 50584794..d90e67cc 100644 --- a/src/generated/yandex/cloud/k8s/v1/cluster.ts +++ b/src/generated/yandex/cloud/k8s/v1/cluster.ts @@ -231,6 +231,10 @@ export interface Master { zonalMaster?: ZonalMaster | undefined; /** Parameters of the region for the master. */ regionalMaster?: RegionalMaster | undefined; + /** Locations specification for Kubernetes control-plane (master) instances. */ + locations: Location[]; + /** Number of etcd nodes in cluster. */ + etcdClusterSize: number; /** Version of Kubernetes components that runs on the master. */ version: string; /** @@ -246,6 +250,8 @@ export interface Master { maintenancePolicy?: MasterMaintenancePolicy; /** Master security groups. */ securityGroupIds: string[]; + /** Cloud Logging for master components. */ + masterLogging?: MasterLogging; } export interface MasterAuth { @@ -276,6 +282,14 @@ export interface RegionalMaster { externalV6Address: string; } +export interface Location { + $type: "yandex.cloud.k8s.v1.Location"; + /** ID of the availability zone where the master resides. */ + zoneId: string; + /** ID of the VPC network's subnet where the master resides. */ + subnetId: string; +} + export interface MasterEndpoints { $type: "yandex.cloud.k8s.v1.MasterEndpoints"; /** Internal endpoint that can be used to connect to the master from cloud networks. */ @@ -327,6 +341,24 @@ export interface MasterMaintenancePolicy { maintenanceWindow?: MaintenanceWindow; } +export interface MasterLogging { + $type: "yandex.cloud.k8s.v1.MasterLogging"; + /** Identifies whether Cloud Logging is enabled for master components. */ + enabled: boolean; + /** ID of the log group where logs of master components should be stored. */ + logGroupId: string | undefined; + /** ID of the folder where logs should be stored (in default group). */ + folderId: string | undefined; + /** Identifies whether Cloud Logging is enabled for audit logs. */ + auditEnabled: boolean; + /** Identifies whether Cloud Logging is enabled for cluster-autoscaler. */ + clusterAutoscalerEnabled: boolean; + /** Identifies whether Cloud Logging is enabled for kube-apiserver. */ + kubeApiserverEnabled: boolean; + /** Identifies whether Cloud Logging is enabled for events. */ + eventsEnabled: boolean; +} + export interface NetworkPolicy { $type: "yandex.cloud.k8s.v1.NetworkPolicy"; provider: NetworkPolicy_Provider; @@ -853,6 +885,7 @@ messageTypeRegistry.set(Cluster_LabelsEntry.$type, Cluster_LabelsEntry); const baseMaster: object = { $type: "yandex.cloud.k8s.v1.Master", + etcdClusterSize: 0, version: "", securityGroupIds: "", }; @@ -876,6 +909,12 @@ export const Master = { writer.uint32(58).fork() ).ldelim(); } + for (const v of message.locations) { + Location.encode(v!, writer.uint32(82).fork()).ldelim(); + } + if (message.etcdClusterSize !== 0) { + writer.uint32(88).int64(message.etcdClusterSize); + } if (message.version !== "") { writer.uint32(18).string(message.version); } @@ -903,6 +942,12 @@ export const Master = { for (const v of message.securityGroupIds) { writer.uint32(66).string(v!); } + if (message.masterLogging !== undefined) { + MasterLogging.encode( + message.masterLogging, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -910,6 +955,7 @@ export const Master = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseMaster } as Master; + message.locations = []; message.securityGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); @@ -923,6 +969,12 @@ export const Master = { reader.uint32() ); break; + case 10: + message.locations.push(Location.decode(reader, reader.uint32())); + break; + case 11: + message.etcdClusterSize = longToNumber(reader.int64() as Long); + break; case 2: message.version = reader.string(); break; @@ -944,6 +996,9 @@ export const Master = { case 8: message.securityGroupIds.push(reader.string()); break; + case 9: + message.masterLogging = MasterLogging.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -962,6 +1017,13 @@ export const Master = { object.regionalMaster !== undefined && object.regionalMaster !== null ? RegionalMaster.fromJSON(object.regionalMaster) : undefined; + message.locations = (object.locations ?? []).map((e: any) => + Location.fromJSON(e) + ); + message.etcdClusterSize = + object.etcdClusterSize !== undefined && object.etcdClusterSize !== null + ? Number(object.etcdClusterSize) + : 0; message.version = object.version !== undefined && object.version !== null ? String(object.version) @@ -986,6 +1048,10 @@ export const Master = { message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => String(e) ); + message.masterLogging = + object.masterLogging !== undefined && object.masterLogging !== null + ? MasterLogging.fromJSON(object.masterLogging) + : undefined; return message; }, @@ -999,6 +1065,15 @@ export const Master = { (obj.regionalMaster = message.regionalMaster ? RegionalMaster.toJSON(message.regionalMaster) : undefined); + if (message.locations) { + obj.locations = message.locations.map((e) => + e ? Location.toJSON(e) : undefined + ); + } else { + obj.locations = []; + } + message.etcdClusterSize !== undefined && + (obj.etcdClusterSize = Math.round(message.etcdClusterSize)); message.version !== undefined && (obj.version = message.version); message.endpoints !== undefined && (obj.endpoints = message.endpoints @@ -1021,6 +1096,10 @@ export const Master = { } else { obj.securityGroupIds = []; } + message.masterLogging !== undefined && + (obj.masterLogging = message.masterLogging + ? MasterLogging.toJSON(message.masterLogging) + : undefined); return obj; }, @@ -1034,6 +1113,9 @@ export const Master = { object.regionalMaster !== undefined && object.regionalMaster !== null ? RegionalMaster.fromPartial(object.regionalMaster) : undefined; + message.locations = + object.locations?.map((e) => Location.fromPartial(e)) || []; + message.etcdClusterSize = object.etcdClusterSize ?? 0; message.version = object.version ?? ""; message.endpoints = object.endpoints !== undefined && object.endpoints !== null @@ -1053,6 +1135,10 @@ export const Master = { ? MasterMaintenancePolicy.fromPartial(object.maintenancePolicy) : undefined; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.masterLogging = + object.masterLogging !== undefined && object.masterLogging !== null + ? MasterLogging.fromPartial(object.masterLogging) + : undefined; return message; }, }; @@ -1322,6 +1408,79 @@ export const RegionalMaster = { messageTypeRegistry.set(RegionalMaster.$type, RegionalMaster); +const baseLocation: object = { + $type: "yandex.cloud.k8s.v1.Location", + zoneId: "", + subnetId: "", +}; + +export const Location = { + $type: "yandex.cloud.k8s.v1.Location" as const, + + encode( + message: Location, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.zoneId !== "") { + writer.uint32(10).string(message.zoneId); + } + if (message.subnetId !== "") { + writer.uint32(18).string(message.subnetId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Location { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLocation } as Location; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.zoneId = reader.string(); + break; + case 2: + message.subnetId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Location { + const message = { ...baseLocation } as Location; + message.zoneId = + object.zoneId !== undefined && object.zoneId !== null + ? String(object.zoneId) + : ""; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + return message; + }, + + toJSON(message: Location): unknown { + const obj: any = {}; + message.zoneId !== undefined && (obj.zoneId = message.zoneId); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + return obj; + }, + + fromPartial, I>>(object: I): Location { + const message = { ...baseLocation } as Location; + message.zoneId = object.zoneId ?? ""; + message.subnetId = object.subnetId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Location.$type, Location); + const baseMasterEndpoints: object = { $type: "yandex.cloud.k8s.v1.MasterEndpoints", internalV4Endpoint: "", @@ -1638,6 +1797,150 @@ export const MasterMaintenancePolicy = { messageTypeRegistry.set(MasterMaintenancePolicy.$type, MasterMaintenancePolicy); +const baseMasterLogging: object = { + $type: "yandex.cloud.k8s.v1.MasterLogging", + enabled: false, + auditEnabled: false, + clusterAutoscalerEnabled: false, + kubeApiserverEnabled: false, + eventsEnabled: false, +}; + +export const MasterLogging = { + $type: "yandex.cloud.k8s.v1.MasterLogging" as const, + + encode( + message: MasterLogging, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enabled === true) { + writer.uint32(8).bool(message.enabled); + } + if (message.logGroupId !== undefined) { + writer.uint32(18).string(message.logGroupId); + } + if (message.folderId !== undefined) { + writer.uint32(26).string(message.folderId); + } + if (message.auditEnabled === true) { + writer.uint32(32).bool(message.auditEnabled); + } + if (message.clusterAutoscalerEnabled === true) { + writer.uint32(40).bool(message.clusterAutoscalerEnabled); + } + if (message.kubeApiserverEnabled === true) { + writer.uint32(48).bool(message.kubeApiserverEnabled); + } + if (message.eventsEnabled === true) { + writer.uint32(56).bool(message.eventsEnabled); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MasterLogging { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMasterLogging } as MasterLogging; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enabled = reader.bool(); + break; + case 2: + message.logGroupId = reader.string(); + break; + case 3: + message.folderId = reader.string(); + break; + case 4: + message.auditEnabled = reader.bool(); + break; + case 5: + message.clusterAutoscalerEnabled = reader.bool(); + break; + case 6: + message.kubeApiserverEnabled = reader.bool(); + break; + case 7: + message.eventsEnabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MasterLogging { + const message = { ...baseMasterLogging } as MasterLogging; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + message.logGroupId = + object.logGroupId !== undefined && object.logGroupId !== null + ? String(object.logGroupId) + : undefined; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : undefined; + message.auditEnabled = + object.auditEnabled !== undefined && object.auditEnabled !== null + ? Boolean(object.auditEnabled) + : false; + message.clusterAutoscalerEnabled = + object.clusterAutoscalerEnabled !== undefined && + object.clusterAutoscalerEnabled !== null + ? Boolean(object.clusterAutoscalerEnabled) + : false; + message.kubeApiserverEnabled = + object.kubeApiserverEnabled !== undefined && + object.kubeApiserverEnabled !== null + ? Boolean(object.kubeApiserverEnabled) + : false; + message.eventsEnabled = + object.eventsEnabled !== undefined && object.eventsEnabled !== null + ? Boolean(object.eventsEnabled) + : false; + return message; + }, + + toJSON(message: MasterLogging): unknown { + const obj: any = {}; + message.enabled !== undefined && (obj.enabled = message.enabled); + message.logGroupId !== undefined && (obj.logGroupId = message.logGroupId); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.auditEnabled !== undefined && + (obj.auditEnabled = message.auditEnabled); + message.clusterAutoscalerEnabled !== undefined && + (obj.clusterAutoscalerEnabled = message.clusterAutoscalerEnabled); + message.kubeApiserverEnabled !== undefined && + (obj.kubeApiserverEnabled = message.kubeApiserverEnabled); + message.eventsEnabled !== undefined && + (obj.eventsEnabled = message.eventsEnabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): MasterLogging { + const message = { ...baseMasterLogging } as MasterLogging; + message.enabled = object.enabled ?? false; + message.logGroupId = object.logGroupId ?? undefined; + message.folderId = object.folderId ?? undefined; + message.auditEnabled = object.auditEnabled ?? false; + message.clusterAutoscalerEnabled = object.clusterAutoscalerEnabled ?? false; + message.kubeApiserverEnabled = object.kubeApiserverEnabled ?? false; + message.eventsEnabled = object.eventsEnabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set(MasterLogging.$type, MasterLogging); + const baseNetworkPolicy: object = { $type: "yandex.cloud.k8s.v1.NetworkPolicy", provider: 0, diff --git a/src/generated/yandex/cloud/k8s/v1/cluster_service.ts b/src/generated/yandex/cloud/k8s/v1/cluster_service.ts index 1b7239f2..5d24ffdb 100644 --- a/src/generated/yandex/cloud/k8s/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/k8s/v1/cluster_service.ts @@ -19,6 +19,7 @@ import { NetworkPolicy, IPAllocationPolicy, MasterMaintenancePolicy, + MasterLogging, ReleaseChannel, KMSProvider, Cluster, @@ -183,6 +184,10 @@ export interface MasterUpdateSpec { maintenancePolicy?: MasterMaintenancePolicy; /** Master security groups. */ securityGroupIds: string[]; + /** Cloud Logging for master components. */ + masterLogging?: MasterLogging; + /** Update master instance locations. */ + locations: LocationSpec[]; } export interface UpdateClusterMetadata { @@ -370,12 +375,33 @@ export interface MasterSpec { zonalMasterSpec?: ZonalMasterSpec | undefined; /** Specification of the regional master. */ regionalMasterSpec?: RegionalMasterSpec | undefined; + /** + * Locations specification for Kubernetes control-plane (master) instances. + * Works in conjunction with [etcd_cluster_size]. See it's documentation for details. + * Possible combinations: + * - 1 location and etcd_cluster_size = 1 - a single node cluster whose availability is limited by the availability of a single Compute Instance; downtime is expected during cluster updates. + * - 1 location and etcd_cluster_size = 3 - a highly available cluster within a single availability zone; can survive the failure of a Compute Instance, a server, or an individual server rack. + * - 3 location and etcd_cluster_size = 3 - a highly available cluster with each etcd instance located within separate availability zone; can survive the failure of a single availability zone. + */ + locations: LocationSpec[]; + /** + * Number of etcd nodes in cluster. + * Works in conjunction with [locations]. See it's documentation for details. + * Optional. If not set, will be assumed equal to the number of locations. + */ + etcdClusterSize: number; + /** Specification of parameters for external IPv4 networking. */ + externalV4AddressSpec?: ExternalAddressSpec; + /** Specification of parameters for external IPv6 networking. */ + externalV6AddressSpec?: ExternalAddressSpec; /** Version of Kubernetes components that runs on the master. */ version: string; /** Maintenance policy of the master. */ maintenancePolicy?: MasterMaintenancePolicy; /** Master security groups. */ securityGroupIds: string[]; + /** Cloud Logging for master components. */ + masterLogging?: MasterLogging; } export interface ZonalMasterSpec { @@ -423,6 +449,17 @@ export interface MasterLocation { internalV4AddressSpec?: InternalAddressSpec; } +export interface LocationSpec { + $type: "yandex.cloud.k8s.v1.LocationSpec"; + /** ID of the availability zone where the master resides. */ + zoneId: string; + /** + * ID of the VPC network's subnet where the master resides. + * If not specified and there is a single subnet in specified zone, address in this subnet will be allocated. + */ + subnetId: string; +} + const baseGetClusterRequest: object = { $type: "yandex.cloud.k8s.v1.GetClusterRequest", clusterId: "", @@ -1426,6 +1463,15 @@ export const MasterUpdateSpec = { for (const v of message.securityGroupIds) { writer.uint32(26).string(v!); } + if (message.masterLogging !== undefined) { + MasterLogging.encode( + message.masterLogging, + writer.uint32(34).fork() + ).ldelim(); + } + for (const v of message.locations) { + LocationSpec.encode(v!, writer.uint32(42).fork()).ldelim(); + } return writer; }, @@ -1434,6 +1480,7 @@ export const MasterUpdateSpec = { let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseMasterUpdateSpec } as MasterUpdateSpec; message.securityGroupIds = []; + message.locations = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1449,6 +1496,12 @@ export const MasterUpdateSpec = { case 3: message.securityGroupIds.push(reader.string()); break; + case 4: + message.masterLogging = MasterLogging.decode(reader, reader.uint32()); + break; + case 5: + message.locations.push(LocationSpec.decode(reader, reader.uint32())); + break; default: reader.skipType(tag & 7); break; @@ -1471,6 +1524,13 @@ export const MasterUpdateSpec = { message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => String(e) ); + message.masterLogging = + object.masterLogging !== undefined && object.masterLogging !== null + ? MasterLogging.fromJSON(object.masterLogging) + : undefined; + message.locations = (object.locations ?? []).map((e: any) => + LocationSpec.fromJSON(e) + ); return message; }, @@ -1489,6 +1549,17 @@ export const MasterUpdateSpec = { } else { obj.securityGroupIds = []; } + message.masterLogging !== undefined && + (obj.masterLogging = message.masterLogging + ? MasterLogging.toJSON(message.masterLogging) + : undefined); + if (message.locations) { + obj.locations = message.locations.map((e) => + e ? LocationSpec.toJSON(e) : undefined + ); + } else { + obj.locations = []; + } return obj; }, @@ -1506,6 +1577,12 @@ export const MasterUpdateSpec = { ? MasterMaintenancePolicy.fromPartial(object.maintenancePolicy) : undefined; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.masterLogging = + object.masterLogging !== undefined && object.masterLogging !== null + ? MasterLogging.fromPartial(object.masterLogging) + : undefined; + message.locations = + object.locations?.map((e) => LocationSpec.fromPartial(e)) || []; return message; }, }; @@ -2710,6 +2787,7 @@ messageTypeRegistry.set( const baseMasterSpec: object = { $type: "yandex.cloud.k8s.v1.MasterSpec", + etcdClusterSize: 0, version: "", securityGroupIds: "", }; @@ -2733,6 +2811,24 @@ export const MasterSpec = { writer.uint32(18).fork() ).ldelim(); } + for (const v of message.locations) { + LocationSpec.encode(v!, writer.uint32(66).fork()).ldelim(); + } + if (message.etcdClusterSize !== 0) { + writer.uint32(72).int64(message.etcdClusterSize); + } + if (message.externalV4AddressSpec !== undefined) { + ExternalAddressSpec.encode( + message.externalV4AddressSpec, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.externalV6AddressSpec !== undefined) { + ExternalAddressSpec.encode( + message.externalV6AddressSpec, + writer.uint32(90).fork() + ).ldelim(); + } if (message.version !== "") { writer.uint32(26).string(message.version); } @@ -2745,6 +2841,12 @@ export const MasterSpec = { for (const v of message.securityGroupIds) { writer.uint32(50).string(v!); } + if (message.masterLogging !== undefined) { + MasterLogging.encode( + message.masterLogging, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -2752,6 +2854,7 @@ export const MasterSpec = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseMasterSpec } as MasterSpec; + message.locations = []; message.securityGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); @@ -2768,6 +2871,24 @@ export const MasterSpec = { reader.uint32() ); break; + case 8: + message.locations.push(LocationSpec.decode(reader, reader.uint32())); + break; + case 9: + message.etcdClusterSize = longToNumber(reader.int64() as Long); + break; + case 10: + message.externalV4AddressSpec = ExternalAddressSpec.decode( + reader, + reader.uint32() + ); + break; + case 11: + message.externalV6AddressSpec = ExternalAddressSpec.decode( + reader, + reader.uint32() + ); + break; case 3: message.version = reader.string(); break; @@ -2780,6 +2901,9 @@ export const MasterSpec = { case 6: message.securityGroupIds.push(reader.string()); break; + case 7: + message.masterLogging = MasterLogging.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -2799,6 +2923,23 @@ export const MasterSpec = { object.regionalMasterSpec !== null ? RegionalMasterSpec.fromJSON(object.regionalMasterSpec) : undefined; + message.locations = (object.locations ?? []).map((e: any) => + LocationSpec.fromJSON(e) + ); + message.etcdClusterSize = + object.etcdClusterSize !== undefined && object.etcdClusterSize !== null + ? Number(object.etcdClusterSize) + : 0; + message.externalV4AddressSpec = + object.externalV4AddressSpec !== undefined && + object.externalV4AddressSpec !== null + ? ExternalAddressSpec.fromJSON(object.externalV4AddressSpec) + : undefined; + message.externalV6AddressSpec = + object.externalV6AddressSpec !== undefined && + object.externalV6AddressSpec !== null + ? ExternalAddressSpec.fromJSON(object.externalV6AddressSpec) + : undefined; message.version = object.version !== undefined && object.version !== null ? String(object.version) @@ -2811,6 +2952,10 @@ export const MasterSpec = { message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => String(e) ); + message.masterLogging = + object.masterLogging !== undefined && object.masterLogging !== null + ? MasterLogging.fromJSON(object.masterLogging) + : undefined; return message; }, @@ -2824,6 +2969,23 @@ export const MasterSpec = { (obj.regionalMasterSpec = message.regionalMasterSpec ? RegionalMasterSpec.toJSON(message.regionalMasterSpec) : undefined); + if (message.locations) { + obj.locations = message.locations.map((e) => + e ? LocationSpec.toJSON(e) : undefined + ); + } else { + obj.locations = []; + } + message.etcdClusterSize !== undefined && + (obj.etcdClusterSize = Math.round(message.etcdClusterSize)); + message.externalV4AddressSpec !== undefined && + (obj.externalV4AddressSpec = message.externalV4AddressSpec + ? ExternalAddressSpec.toJSON(message.externalV4AddressSpec) + : undefined); + message.externalV6AddressSpec !== undefined && + (obj.externalV6AddressSpec = message.externalV6AddressSpec + ? ExternalAddressSpec.toJSON(message.externalV6AddressSpec) + : undefined); message.version !== undefined && (obj.version = message.version); message.maintenancePolicy !== undefined && (obj.maintenancePolicy = message.maintenancePolicy @@ -2834,6 +2996,10 @@ export const MasterSpec = { } else { obj.securityGroupIds = []; } + message.masterLogging !== undefined && + (obj.masterLogging = message.masterLogging + ? MasterLogging.toJSON(message.masterLogging) + : undefined); return obj; }, @@ -2850,6 +3016,19 @@ export const MasterSpec = { object.regionalMasterSpec !== null ? RegionalMasterSpec.fromPartial(object.regionalMasterSpec) : undefined; + message.locations = + object.locations?.map((e) => LocationSpec.fromPartial(e)) || []; + message.etcdClusterSize = object.etcdClusterSize ?? 0; + message.externalV4AddressSpec = + object.externalV4AddressSpec !== undefined && + object.externalV4AddressSpec !== null + ? ExternalAddressSpec.fromPartial(object.externalV4AddressSpec) + : undefined; + message.externalV6AddressSpec = + object.externalV6AddressSpec !== undefined && + object.externalV6AddressSpec !== null + ? ExternalAddressSpec.fromPartial(object.externalV6AddressSpec) + : undefined; message.version = object.version ?? ""; message.maintenancePolicy = object.maintenancePolicy !== undefined && @@ -2857,6 +3036,10 @@ export const MasterSpec = { ? MasterMaintenancePolicy.fromPartial(object.maintenancePolicy) : undefined; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.masterLogging = + object.masterLogging !== undefined && object.masterLogging !== null + ? MasterLogging.fromPartial(object.masterLogging) + : undefined; return message; }, }; @@ -3324,6 +3507,81 @@ export const MasterLocation = { messageTypeRegistry.set(MasterLocation.$type, MasterLocation); +const baseLocationSpec: object = { + $type: "yandex.cloud.k8s.v1.LocationSpec", + zoneId: "", + subnetId: "", +}; + +export const LocationSpec = { + $type: "yandex.cloud.k8s.v1.LocationSpec" as const, + + encode( + message: LocationSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.zoneId !== "") { + writer.uint32(10).string(message.zoneId); + } + if (message.subnetId !== "") { + writer.uint32(18).string(message.subnetId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LocationSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLocationSpec } as LocationSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.zoneId = reader.string(); + break; + case 2: + message.subnetId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LocationSpec { + const message = { ...baseLocationSpec } as LocationSpec; + message.zoneId = + object.zoneId !== undefined && object.zoneId !== null + ? String(object.zoneId) + : ""; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + return message; + }, + + toJSON(message: LocationSpec): unknown { + const obj: any = {}; + message.zoneId !== undefined && (obj.zoneId = message.zoneId); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + return obj; + }, + + fromPartial, I>>( + object: I + ): LocationSpec { + const message = { ...baseLocationSpec } as LocationSpec; + message.zoneId = object.zoneId ?? ""; + message.subnetId = object.subnetId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(LocationSpec.$type, LocationSpec); + /** A set of methods for managing Kubernetes cluster. */ export const ClusterServiceService = { /** diff --git a/src/generated/yandex/cloud/k8s/v1/node.ts b/src/generated/yandex/cloud/k8s/v1/node.ts index 02a3220d..67b4321a 100644 --- a/src/generated/yandex/cloud/k8s/v1/node.ts +++ b/src/generated/yandex/cloud/k8s/v1/node.ts @@ -86,6 +86,10 @@ export enum Node_Status { * is deleted (this is our bug). */ MISSING = 5, + /** STOPPED - Node is stopped */ + STOPPED = 6, + /** UNKNOWN - Backend request to kubernetes api was unsuccessful. */ + UNKNOWN = 7, UNRECOGNIZED = -1, } @@ -109,6 +113,12 @@ export function node_StatusFromJSON(object: any): Node_Status { case 5: case "MISSING": return Node_Status.MISSING; + case 6: + case "STOPPED": + return Node_Status.STOPPED; + case 7: + case "UNKNOWN": + return Node_Status.UNKNOWN; case -1: case "UNRECOGNIZED": default: @@ -130,6 +140,10 @@ export function node_StatusToJSON(object: Node_Status): string { return "READY"; case Node_Status.MISSING: return "MISSING"; + case Node_Status.STOPPED: + return "STOPPED"; + case Node_Status.UNKNOWN: + return "UNKNOWN"; default: return "UNKNOWN"; } @@ -281,10 +295,9 @@ export interface NodeTemplate { /** Specification for the boot disk that will be attached to the node. */ bootDiskSpec?: DiskSpec; /** - * The metadata as `key:value` pairs assigned to this instance template. This includes custom metadata and predefined keys. + * The metadata as `key:value` pairs assigned to this instance template. Only SSH keys are supported as metadata. * - * For example, you may use the metadata in order to provide your public SSH key to the node. - * For more information, see [Metadata](/docs/compute/concepts/vm-metadata). + * For more information, see [Connecting to a node over SSH](/docs/managed-kubernetes/operations/node-connect-ssh). */ metadata: { [key: string]: string }; /** @@ -305,6 +318,9 @@ export interface NodeTemplate { /** this parameter allows to specify type of network acceleration used on nodes (instances) */ networkSettings?: NodeTemplate_NetworkSettings; containerRuntimeSettings?: NodeTemplate_ContainerRuntimeSettings; + containerNetworkSettings?: NodeTemplate_ContainerNetworkSettings; + /** GPU settings */ + gpuSettings?: GpuSettings; } export interface NodeTemplate_LabelsEntry { @@ -417,6 +433,68 @@ export function nodeTemplate_ContainerRuntimeSettings_TypeToJSON( } } +export interface NodeTemplate_ContainerNetworkSettings { + $type: "yandex.cloud.k8s.v1.NodeTemplate.ContainerNetworkSettings"; + podMtu: number; +} + +export interface GpuSettings { + $type: "yandex.cloud.k8s.v1.GpuSettings"; + /** GPU cluster id, that mk8s node will join. */ + gpuClusterId: string; + /** GPU environment configured on node. */ + gpuEnvironment: GpuSettings_GpuEnvironment; +} + +export enum GpuSettings_GpuEnvironment { + /** GPU_ENVIRONMENT_UNSPECIFIED - Use one of the values below, depending on the default for the specific Cloud installation. */ + GPU_ENVIRONMENT_UNSPECIFIED = 0, + /** RUNC_DRIVERS_CUDA - Use a node image with the pre-installed GPU toolkit, drivers and CUDA. */ + RUNC_DRIVERS_CUDA = 1, + /** + * RUNC - Use a node image with the pre-installed GPU toolkit but without drivers. + * You should install drivers on a node yourself in that case. + * There are tools to help you to do that, for example gpu-operator. + */ + RUNC = 2, + UNRECOGNIZED = -1, +} + +export function gpuSettings_GpuEnvironmentFromJSON( + object: any +): GpuSettings_GpuEnvironment { + switch (object) { + case 0: + case "GPU_ENVIRONMENT_UNSPECIFIED": + return GpuSettings_GpuEnvironment.GPU_ENVIRONMENT_UNSPECIFIED; + case 1: + case "RUNC_DRIVERS_CUDA": + return GpuSettings_GpuEnvironment.RUNC_DRIVERS_CUDA; + case 2: + case "RUNC": + return GpuSettings_GpuEnvironment.RUNC; + case -1: + case "UNRECOGNIZED": + default: + return GpuSettings_GpuEnvironment.UNRECOGNIZED; + } +} + +export function gpuSettings_GpuEnvironmentToJSON( + object: GpuSettings_GpuEnvironment +): string { + switch (object) { + case GpuSettings_GpuEnvironment.GPU_ENVIRONMENT_UNSPECIFIED: + return "GPU_ENVIRONMENT_UNSPECIFIED"; + case GpuSettings_GpuEnvironment.RUNC_DRIVERS_CUDA: + return "RUNC_DRIVERS_CUDA"; + case GpuSettings_GpuEnvironment.RUNC: + return "RUNC"; + default: + return "UNKNOWN"; + } +} + export interface NetworkInterfaceSpec { $type: "yandex.cloud.k8s.v1.NetworkInterfaceSpec"; /** IDs of the subnets. */ @@ -1266,6 +1344,18 @@ export const NodeTemplate = { writer.uint32(98).fork() ).ldelim(); } + if (message.containerNetworkSettings !== undefined) { + NodeTemplate_ContainerNetworkSettings.encode( + message.containerNetworkSettings, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.gpuSettings !== undefined) { + GpuSettings.encode( + message.gpuSettings, + writer.uint32(146).fork() + ).ldelim(); + } return writer; }, @@ -1345,6 +1435,16 @@ export const NodeTemplate = { reader.uint32() ); break; + case 16: + message.containerNetworkSettings = + NodeTemplate_ContainerNetworkSettings.decode( + reader, + reader.uint32() + ); + break; + case 18: + message.gpuSettings = GpuSettings.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1409,6 +1509,17 @@ export const NodeTemplate = { object.containerRuntimeSettings ) : undefined; + message.containerNetworkSettings = + object.containerNetworkSettings !== undefined && + object.containerNetworkSettings !== null + ? NodeTemplate_ContainerNetworkSettings.fromJSON( + object.containerNetworkSettings + ) + : undefined; + message.gpuSettings = + object.gpuSettings !== undefined && object.gpuSettings !== null + ? GpuSettings.fromJSON(object.gpuSettings) + : undefined; return message; }, @@ -1465,6 +1576,16 @@ export const NodeTemplate = { message.containerRuntimeSettings ) : undefined); + message.containerNetworkSettings !== undefined && + (obj.containerNetworkSettings = message.containerNetworkSettings + ? NodeTemplate_ContainerNetworkSettings.toJSON( + message.containerNetworkSettings + ) + : undefined); + message.gpuSettings !== undefined && + (obj.gpuSettings = message.gpuSettings + ? GpuSettings.toJSON(message.gpuSettings) + : undefined); return obj; }, @@ -1525,6 +1646,17 @@ export const NodeTemplate = { object.containerRuntimeSettings ) : undefined; + message.containerNetworkSettings = + object.containerNetworkSettings !== undefined && + object.containerNetworkSettings !== null + ? NodeTemplate_ContainerNetworkSettings.fromPartial( + object.containerNetworkSettings + ) + : undefined; + message.gpuSettings = + object.gpuSettings !== undefined && object.gpuSettings !== null + ? GpuSettings.fromPartial(object.gpuSettings) + : undefined; return message; }, }; @@ -1853,6 +1985,159 @@ messageTypeRegistry.set( NodeTemplate_ContainerRuntimeSettings ); +const baseNodeTemplate_ContainerNetworkSettings: object = { + $type: "yandex.cloud.k8s.v1.NodeTemplate.ContainerNetworkSettings", + podMtu: 0, +}; + +export const NodeTemplate_ContainerNetworkSettings = { + $type: "yandex.cloud.k8s.v1.NodeTemplate.ContainerNetworkSettings" as const, + + encode( + message: NodeTemplate_ContainerNetworkSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.podMtu !== 0) { + writer.uint32(8).int64(message.podMtu); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): NodeTemplate_ContainerNetworkSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseNodeTemplate_ContainerNetworkSettings, + } as NodeTemplate_ContainerNetworkSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.podMtu = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): NodeTemplate_ContainerNetworkSettings { + const message = { + ...baseNodeTemplate_ContainerNetworkSettings, + } as NodeTemplate_ContainerNetworkSettings; + message.podMtu = + object.podMtu !== undefined && object.podMtu !== null + ? Number(object.podMtu) + : 0; + return message; + }, + + toJSON(message: NodeTemplate_ContainerNetworkSettings): unknown { + const obj: any = {}; + message.podMtu !== undefined && (obj.podMtu = Math.round(message.podMtu)); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): NodeTemplate_ContainerNetworkSettings { + const message = { + ...baseNodeTemplate_ContainerNetworkSettings, + } as NodeTemplate_ContainerNetworkSettings; + message.podMtu = object.podMtu ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + NodeTemplate_ContainerNetworkSettings.$type, + NodeTemplate_ContainerNetworkSettings +); + +const baseGpuSettings: object = { + $type: "yandex.cloud.k8s.v1.GpuSettings", + gpuClusterId: "", + gpuEnvironment: 0, +}; + +export const GpuSettings = { + $type: "yandex.cloud.k8s.v1.GpuSettings" as const, + + encode( + message: GpuSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gpuClusterId !== "") { + writer.uint32(10).string(message.gpuClusterId); + } + if (message.gpuEnvironment !== 0) { + writer.uint32(16).int32(message.gpuEnvironment); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GpuSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGpuSettings } as GpuSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gpuClusterId = reader.string(); + break; + case 2: + message.gpuEnvironment = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GpuSettings { + const message = { ...baseGpuSettings } as GpuSettings; + message.gpuClusterId = + object.gpuClusterId !== undefined && object.gpuClusterId !== null + ? String(object.gpuClusterId) + : ""; + message.gpuEnvironment = + object.gpuEnvironment !== undefined && object.gpuEnvironment !== null + ? gpuSettings_GpuEnvironmentFromJSON(object.gpuEnvironment) + : 0; + return message; + }, + + toJSON(message: GpuSettings): unknown { + const obj: any = {}; + message.gpuClusterId !== undefined && + (obj.gpuClusterId = message.gpuClusterId); + message.gpuEnvironment !== undefined && + (obj.gpuEnvironment = gpuSettings_GpuEnvironmentToJSON( + message.gpuEnvironment + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): GpuSettings { + const message = { ...baseGpuSettings } as GpuSettings; + message.gpuClusterId = object.gpuClusterId ?? ""; + message.gpuEnvironment = object.gpuEnvironment ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(GpuSettings.$type, GpuSettings); + const baseNetworkInterfaceSpec: object = { $type: "yandex.cloud.k8s.v1.NetworkInterfaceSpec", subnetIds: "", diff --git a/src/generated/yandex/cloud/kms/index.ts b/src/generated/yandex/cloud/kms/index.ts index 6a9bb4d4..89e4a981 100644 --- a/src/generated/yandex/cloud/kms/index.ts +++ b/src/generated/yandex/cloud/kms/index.ts @@ -1,3 +1,9 @@ export * as symmetric_crypto_service from './v1/symmetric_crypto_service' export * as symmetric_key from './v1/symmetric_key' -export * as symmetric_key_service from './v1/symmetric_key_service' \ No newline at end of file +export * as symmetric_key_service from './v1/symmetric_key_service' +export * as asymmetric_encryption_crypto_service from './v1/asymmetricencryption/asymmetric_encryption_crypto_service' +export * as asymmetric_encryption_key from './v1/asymmetricencryption/asymmetric_encryption_key' +export * as asymmetric_encryption_key_service from './v1/asymmetricencryption/asymmetric_encryption_key_service' +export * as asymmetric_signature_crypto_service from './v1/asymmetricsignature/asymmetric_signature_crypto_service' +export * as asymmetric_signature_key from './v1/asymmetricsignature/asymmetric_signature_key' +export * as asymmetric_signature_key_service from './v1/asymmetricsignature/asymmetric_signature_key_service' \ No newline at end of file diff --git a/src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_crypto_service.ts b/src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_crypto_service.ts new file mode 100644 index 00000000..b3060918 --- /dev/null +++ b/src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_crypto_service.ts @@ -0,0 +1,577 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.kms.v1.asymmetricencryption"; + +export interface AsymmetricDecryptRequest { + $type: "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricDecryptRequest"; + /** ID of the asymmetric KMS key to use for decryption. */ + keyId: string; + /** + * Ciphertext to be decrypted. + * Should be encoded with base64. + */ + ciphertext: Buffer; +} + +export interface AsymmetricDecryptResponse { + $type: "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricDecryptResponse"; + /** ID of the asymmetric KMS key that was used for decryption. */ + keyId: string; + /** Decrypted plaintext. */ + plaintext: Buffer; +} + +export interface AsymmetricGetPublicKeyRequest { + $type: "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricGetPublicKeyRequest"; + /** ID of the asymmetric KMS key to be used for public key retrieval. */ + keyId: string; +} + +export interface AsymmetricGetPublicKeyResponse { + $type: "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricGetPublicKeyResponse"; + /** ID of the asymmetric KMS key to get public key of. */ + keyId: string; + /** + * Public key value. + * The value is a PEM-encoded X.509 public key, also known as SubjectPublicKeyInfo (SPKI), + * as defined in RFC 5280. + */ + publicKey: string; +} + +const baseAsymmetricDecryptRequest: object = { + $type: "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricDecryptRequest", + keyId: "", +}; + +export const AsymmetricDecryptRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricDecryptRequest" as const, + + encode( + message: AsymmetricDecryptRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.ciphertext.length !== 0) { + writer.uint32(18).bytes(message.ciphertext); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricDecryptRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsymmetricDecryptRequest, + } as AsymmetricDecryptRequest; + message.ciphertext = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.ciphertext = reader.bytes() as Buffer; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricDecryptRequest { + const message = { + ...baseAsymmetricDecryptRequest, + } as AsymmetricDecryptRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.ciphertext = + object.ciphertext !== undefined && object.ciphertext !== null + ? Buffer.from(bytesFromBase64(object.ciphertext)) + : Buffer.alloc(0); + return message; + }, + + toJSON(message: AsymmetricDecryptRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.ciphertext !== undefined && + (obj.ciphertext = base64FromBytes( + message.ciphertext !== undefined ? message.ciphertext : Buffer.alloc(0) + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricDecryptRequest { + const message = { + ...baseAsymmetricDecryptRequest, + } as AsymmetricDecryptRequest; + message.keyId = object.keyId ?? ""; + message.ciphertext = object.ciphertext ?? Buffer.alloc(0); + return message; + }, +}; + +messageTypeRegistry.set( + AsymmetricDecryptRequest.$type, + AsymmetricDecryptRequest +); + +const baseAsymmetricDecryptResponse: object = { + $type: "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricDecryptResponse", + keyId: "", +}; + +export const AsymmetricDecryptResponse = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricDecryptResponse" as const, + + encode( + message: AsymmetricDecryptResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.plaintext.length !== 0) { + writer.uint32(18).bytes(message.plaintext); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricDecryptResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsymmetricDecryptResponse, + } as AsymmetricDecryptResponse; + message.plaintext = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.plaintext = reader.bytes() as Buffer; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricDecryptResponse { + const message = { + ...baseAsymmetricDecryptResponse, + } as AsymmetricDecryptResponse; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.plaintext = + object.plaintext !== undefined && object.plaintext !== null + ? Buffer.from(bytesFromBase64(object.plaintext)) + : Buffer.alloc(0); + return message; + }, + + toJSON(message: AsymmetricDecryptResponse): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.plaintext !== undefined && + (obj.plaintext = base64FromBytes( + message.plaintext !== undefined ? message.plaintext : Buffer.alloc(0) + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricDecryptResponse { + const message = { + ...baseAsymmetricDecryptResponse, + } as AsymmetricDecryptResponse; + message.keyId = object.keyId ?? ""; + message.plaintext = object.plaintext ?? Buffer.alloc(0); + return message; + }, +}; + +messageTypeRegistry.set( + AsymmetricDecryptResponse.$type, + AsymmetricDecryptResponse +); + +const baseAsymmetricGetPublicKeyRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricGetPublicKeyRequest", + keyId: "", +}; + +export const AsymmetricGetPublicKeyRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricGetPublicKeyRequest" as const, + + encode( + message: AsymmetricGetPublicKeyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricGetPublicKeyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsymmetricGetPublicKeyRequest, + } as AsymmetricGetPublicKeyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricGetPublicKeyRequest { + const message = { + ...baseAsymmetricGetPublicKeyRequest, + } as AsymmetricGetPublicKeyRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: AsymmetricGetPublicKeyRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricGetPublicKeyRequest { + const message = { + ...baseAsymmetricGetPublicKeyRequest, + } as AsymmetricGetPublicKeyRequest; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AsymmetricGetPublicKeyRequest.$type, + AsymmetricGetPublicKeyRequest +); + +const baseAsymmetricGetPublicKeyResponse: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricGetPublicKeyResponse", + keyId: "", + publicKey: "", +}; + +export const AsymmetricGetPublicKeyResponse = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricGetPublicKeyResponse" as const, + + encode( + message: AsymmetricGetPublicKeyResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.publicKey !== "") { + writer.uint32(18).string(message.publicKey); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricGetPublicKeyResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsymmetricGetPublicKeyResponse, + } as AsymmetricGetPublicKeyResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.publicKey = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricGetPublicKeyResponse { + const message = { + ...baseAsymmetricGetPublicKeyResponse, + } as AsymmetricGetPublicKeyResponse; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.publicKey = + object.publicKey !== undefined && object.publicKey !== null + ? String(object.publicKey) + : ""; + return message; + }, + + toJSON(message: AsymmetricGetPublicKeyResponse): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.publicKey !== undefined && (obj.publicKey = message.publicKey); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricGetPublicKeyResponse { + const message = { + ...baseAsymmetricGetPublicKeyResponse, + } as AsymmetricGetPublicKeyResponse; + message.keyId = object.keyId ?? ""; + message.publicKey = object.publicKey ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AsymmetricGetPublicKeyResponse.$type, + AsymmetricGetPublicKeyResponse +); + +/** Set of methods that perform asymmetric decryption. */ +export const AsymmetricEncryptionCryptoServiceService = { + /** Decrypts the given ciphertext with the specified key. */ + decrypt: { + path: "/yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionCryptoService/Decrypt", + requestStream: false, + responseStream: false, + requestSerialize: (value: AsymmetricDecryptRequest) => + Buffer.from(AsymmetricDecryptRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + AsymmetricDecryptRequest.decode(value), + responseSerialize: (value: AsymmetricDecryptResponse) => + Buffer.from(AsymmetricDecryptResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + AsymmetricDecryptResponse.decode(value), + }, + /** Gets value of public key. */ + getPublicKey: { + path: "/yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionCryptoService/GetPublicKey", + requestStream: false, + responseStream: false, + requestSerialize: (value: AsymmetricGetPublicKeyRequest) => + Buffer.from(AsymmetricGetPublicKeyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + AsymmetricGetPublicKeyRequest.decode(value), + responseSerialize: (value: AsymmetricGetPublicKeyResponse) => + Buffer.from(AsymmetricGetPublicKeyResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + AsymmetricGetPublicKeyResponse.decode(value), + }, +} as const; + +export interface AsymmetricEncryptionCryptoServiceServer + extends UntypedServiceImplementation { + /** Decrypts the given ciphertext with the specified key. */ + decrypt: handleUnaryCall; + /** Gets value of public key. */ + getPublicKey: handleUnaryCall< + AsymmetricGetPublicKeyRequest, + AsymmetricGetPublicKeyResponse + >; +} + +export interface AsymmetricEncryptionCryptoServiceClient extends Client { + /** Decrypts the given ciphertext with the specified key. */ + decrypt( + request: AsymmetricDecryptRequest, + callback: ( + error: ServiceError | null, + response: AsymmetricDecryptResponse + ) => void + ): ClientUnaryCall; + decrypt( + request: AsymmetricDecryptRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: AsymmetricDecryptResponse + ) => void + ): ClientUnaryCall; + decrypt( + request: AsymmetricDecryptRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: AsymmetricDecryptResponse + ) => void + ): ClientUnaryCall; + /** Gets value of public key. */ + getPublicKey( + request: AsymmetricGetPublicKeyRequest, + callback: ( + error: ServiceError | null, + response: AsymmetricGetPublicKeyResponse + ) => void + ): ClientUnaryCall; + getPublicKey( + request: AsymmetricGetPublicKeyRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: AsymmetricGetPublicKeyResponse + ) => void + ): ClientUnaryCall; + getPublicKey( + request: AsymmetricGetPublicKeyRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: AsymmetricGetPublicKeyResponse + ) => void + ): ClientUnaryCall; +} + +export const AsymmetricEncryptionCryptoServiceClient = + makeGenericClientConstructor( + AsymmetricEncryptionCryptoServiceService, + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionCryptoService" + ) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): AsymmetricEncryptionCryptoServiceClient; + service: typeof AsymmetricEncryptionCryptoServiceService; + }; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (const byte of arr) { + bin.push(String.fromCharCode(byte)); + } + return btoa(bin.join("")); +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_key.ts b/src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_key.ts new file mode 100644 index 00000000..9765618a --- /dev/null +++ b/src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_key.ts @@ -0,0 +1,501 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.kms.v1.asymmetricencryption"; + +/** Supported asymmetric encryption algorithms. */ +export enum AsymmetricEncryptionAlgorithm { + ASYMMETRIC_ENCRYPTION_ALGORITHM_UNSPECIFIED = 0, + /** RSA_2048_ENC_OAEP_SHA_256 - RSA-2048 encryption with OAEP padding and SHA-256 */ + RSA_2048_ENC_OAEP_SHA_256 = 1, + /** RSA_3072_ENC_OAEP_SHA_256 - RSA-3072 encryption with OAEP padding and SHA-256 */ + RSA_3072_ENC_OAEP_SHA_256 = 2, + /** RSA_4096_ENC_OAEP_SHA_256 - RSA-4096 encryption with OAEP padding and SHA-256 */ + RSA_4096_ENC_OAEP_SHA_256 = 3, + UNRECOGNIZED = -1, +} + +export function asymmetricEncryptionAlgorithmFromJSON( + object: any +): AsymmetricEncryptionAlgorithm { + switch (object) { + case 0: + case "ASYMMETRIC_ENCRYPTION_ALGORITHM_UNSPECIFIED": + return AsymmetricEncryptionAlgorithm.ASYMMETRIC_ENCRYPTION_ALGORITHM_UNSPECIFIED; + case 1: + case "RSA_2048_ENC_OAEP_SHA_256": + return AsymmetricEncryptionAlgorithm.RSA_2048_ENC_OAEP_SHA_256; + case 2: + case "RSA_3072_ENC_OAEP_SHA_256": + return AsymmetricEncryptionAlgorithm.RSA_3072_ENC_OAEP_SHA_256; + case 3: + case "RSA_4096_ENC_OAEP_SHA_256": + return AsymmetricEncryptionAlgorithm.RSA_4096_ENC_OAEP_SHA_256; + case -1: + case "UNRECOGNIZED": + default: + return AsymmetricEncryptionAlgorithm.UNRECOGNIZED; + } +} + +export function asymmetricEncryptionAlgorithmToJSON( + object: AsymmetricEncryptionAlgorithm +): string { + switch (object) { + case AsymmetricEncryptionAlgorithm.ASYMMETRIC_ENCRYPTION_ALGORITHM_UNSPECIFIED: + return "ASYMMETRIC_ENCRYPTION_ALGORITHM_UNSPECIFIED"; + case AsymmetricEncryptionAlgorithm.RSA_2048_ENC_OAEP_SHA_256: + return "RSA_2048_ENC_OAEP_SHA_256"; + case AsymmetricEncryptionAlgorithm.RSA_3072_ENC_OAEP_SHA_256: + return "RSA_3072_ENC_OAEP_SHA_256"; + case AsymmetricEncryptionAlgorithm.RSA_4096_ENC_OAEP_SHA_256: + return "RSA_4096_ENC_OAEP_SHA_256"; + default: + return "UNKNOWN"; + } +} + +/** An asymmetric KMS key that may contain several versions of the cryptographic material. */ +export interface AsymmetricEncryptionKey { + $type: "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKey"; + /** ID of the key. */ + id: string; + /** ID of the folder that the key belongs to. */ + folderId: string; + /** Time when the key was created. */ + createdAt?: Date; + /** Name of the key. */ + name: string; + /** Description of the key. */ + description: string; + /** Custom labels for the key as `key:value` pairs. Maximum 64 per key. */ + labels: { [key: string]: string }; + /** Current status of the key. */ + status: AsymmetricEncryptionKey_Status; + /** Asymmetric Encryption Algorithm ID. */ + encryptionAlgorithm: AsymmetricEncryptionAlgorithm; + /** Flag that inhibits deletion of the key */ + deletionProtection: boolean; +} + +export enum AsymmetricEncryptionKey_Status { + STATUS_UNSPECIFIED = 0, + /** CREATING - The key is being created. */ + CREATING = 1, + /** + * ACTIVE - The key is active and can be used for encryption and decryption or signature and verification. + * Can be set to INACTIVE using the [AsymmetricKeyService.Update] method. + */ + ACTIVE = 2, + /** + * INACTIVE - The key is inactive and unusable. + * Can be set to ACTIVE using the [AsymmetricKeyService.Update] method. + */ + INACTIVE = 3, + UNRECOGNIZED = -1, +} + +export function asymmetricEncryptionKey_StatusFromJSON( + object: any +): AsymmetricEncryptionKey_Status { + switch (object) { + case 0: + case "STATUS_UNSPECIFIED": + return AsymmetricEncryptionKey_Status.STATUS_UNSPECIFIED; + case 1: + case "CREATING": + return AsymmetricEncryptionKey_Status.CREATING; + case 2: + case "ACTIVE": + return AsymmetricEncryptionKey_Status.ACTIVE; + case 3: + case "INACTIVE": + return AsymmetricEncryptionKey_Status.INACTIVE; + case -1: + case "UNRECOGNIZED": + default: + return AsymmetricEncryptionKey_Status.UNRECOGNIZED; + } +} + +export function asymmetricEncryptionKey_StatusToJSON( + object: AsymmetricEncryptionKey_Status +): string { + switch (object) { + case AsymmetricEncryptionKey_Status.STATUS_UNSPECIFIED: + return "STATUS_UNSPECIFIED"; + case AsymmetricEncryptionKey_Status.CREATING: + return "CREATING"; + case AsymmetricEncryptionKey_Status.ACTIVE: + return "ACTIVE"; + case AsymmetricEncryptionKey_Status.INACTIVE: + return "INACTIVE"; + default: + return "UNKNOWN"; + } +} + +export interface AsymmetricEncryptionKey_LabelsEntry { + $type: "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKey.LabelsEntry"; + key: string; + value: string; +} + +const baseAsymmetricEncryptionKey: object = { + $type: "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKey", + id: "", + folderId: "", + name: "", + description: "", + status: 0, + encryptionAlgorithm: 0, + deletionProtection: false, +}; + +export const AsymmetricEncryptionKey = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKey" as const, + + encode( + message: AsymmetricEncryptionKey, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(34).string(message.name); + } + if (message.description !== "") { + writer.uint32(42).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + AsymmetricEncryptionKey_LabelsEntry.encode( + { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKey.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(50).fork() + ).ldelim(); + }); + if (message.status !== 0) { + writer.uint32(56).int32(message.status); + } + if (message.encryptionAlgorithm !== 0) { + writer.uint32(64).int32(message.encryptionAlgorithm); + } + if (message.deletionProtection === true) { + writer.uint32(72).bool(message.deletionProtection); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricEncryptionKey { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsymmetricEncryptionKey, + } as AsymmetricEncryptionKey; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.name = reader.string(); + break; + case 5: + message.description = reader.string(); + break; + case 6: + const entry6 = AsymmetricEncryptionKey_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry6.value !== undefined) { + message.labels[entry6.key] = entry6.value; + } + break; + case 7: + message.status = reader.int32() as any; + break; + case 8: + message.encryptionAlgorithm = reader.int32() as any; + break; + case 9: + message.deletionProtection = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricEncryptionKey { + const message = { + ...baseAsymmetricEncryptionKey, + } as AsymmetricEncryptionKey; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.status = + object.status !== undefined && object.status !== null + ? asymmetricEncryptionKey_StatusFromJSON(object.status) + : 0; + message.encryptionAlgorithm = + object.encryptionAlgorithm !== undefined && + object.encryptionAlgorithm !== null + ? asymmetricEncryptionAlgorithmFromJSON(object.encryptionAlgorithm) + : 0; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + return message; + }, + + toJSON(message: AsymmetricEncryptionKey): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.status !== undefined && + (obj.status = asymmetricEncryptionKey_StatusToJSON(message.status)); + message.encryptionAlgorithm !== undefined && + (obj.encryptionAlgorithm = asymmetricEncryptionAlgorithmToJSON( + message.encryptionAlgorithm + )); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricEncryptionKey { + const message = { + ...baseAsymmetricEncryptionKey, + } as AsymmetricEncryptionKey; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.status = object.status ?? 0; + message.encryptionAlgorithm = object.encryptionAlgorithm ?? 0; + message.deletionProtection = object.deletionProtection ?? false; + return message; + }, +}; + +messageTypeRegistry.set(AsymmetricEncryptionKey.$type, AsymmetricEncryptionKey); + +const baseAsymmetricEncryptionKey_LabelsEntry: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKey.LabelsEntry", + key: "", + value: "", +}; + +export const AsymmetricEncryptionKey_LabelsEntry = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKey.LabelsEntry" as const, + + encode( + message: AsymmetricEncryptionKey_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricEncryptionKey_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsymmetricEncryptionKey_LabelsEntry, + } as AsymmetricEncryptionKey_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricEncryptionKey_LabelsEntry { + const message = { + ...baseAsymmetricEncryptionKey_LabelsEntry, + } as AsymmetricEncryptionKey_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: AsymmetricEncryptionKey_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): AsymmetricEncryptionKey_LabelsEntry { + const message = { + ...baseAsymmetricEncryptionKey_LabelsEntry, + } as AsymmetricEncryptionKey_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AsymmetricEncryptionKey_LabelsEntry.$type, + AsymmetricEncryptionKey_LabelsEntry +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_key_service.ts b/src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_key_service.ts new file mode 100644 index 00000000..4818538f --- /dev/null +++ b/src/generated/yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_key_service.ts @@ -0,0 +1,1973 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + AsymmetricEncryptionAlgorithm, + AsymmetricEncryptionKey_Status, + AsymmetricEncryptionKey, + asymmetricEncryptionAlgorithmFromJSON, + asymmetricEncryptionAlgorithmToJSON, + asymmetricEncryptionKey_StatusFromJSON, + asymmetricEncryptionKey_StatusToJSON, +} from "../../../../../yandex/cloud/kms/v1/asymmetricencryption/asymmetric_encryption_key"; +import { FieldMask } from "../../../../../google/protobuf/field_mask"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; +import { + ListAccessBindingsRequest, + ListAccessBindingsResponse, + SetAccessBindingsRequest, + UpdateAccessBindingsRequest, +} from "../../../../../yandex/cloud/access/access"; + +export const protobufPackage = "yandex.cloud.kms.v1.asymmetricencryption"; + +export interface CreateAsymmetricEncryptionKeyRequest { + $type: "yandex.cloud.kms.v1.asymmetricencryption.CreateAsymmetricEncryptionKeyRequest"; + /** ID of the folder to create a asymmetric KMS key in. */ + folderId: string; + /** Name of the key. */ + name: string; + /** Description of the key. */ + description: string; + /** + * Custom labels for the asymmetric KMS key as `key:value` pairs. Maximum 64 per key. + * For example, `"project": "mvp"` or `"source": "dictionary"`. + */ + labels: { [key: string]: string }; + /** Asymmetric encryption algorithm. */ + encryptionAlgorithm: AsymmetricEncryptionAlgorithm; + /** Flag that inhibits deletion of the symmetric KMS key */ + deletionProtection: boolean; +} + +export interface CreateAsymmetricEncryptionKeyRequest_LabelsEntry { + $type: "yandex.cloud.kms.v1.asymmetricencryption.CreateAsymmetricEncryptionKeyRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateAsymmetricEncryptionKeyMetadata { + $type: "yandex.cloud.kms.v1.asymmetricencryption.CreateAsymmetricEncryptionKeyMetadata"; + /** ID of the key being created. */ + keyId: string; +} + +export interface GetAsymmetricEncryptionKeyRequest { + $type: "yandex.cloud.kms.v1.asymmetricencryption.GetAsymmetricEncryptionKeyRequest"; + /** + * ID of the asymmetric KMS key to return. + * To get the ID of an asymmetric KMS key use a [AsymmetricEncryptionKeyService.List] request. + */ + keyId: string; +} + +export interface ListAsymmetricEncryptionKeysRequest { + $type: "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeysRequest"; + /** ID of the folder to list asymmetric KMS keys in. */ + folderId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListAsymmetricEncryptionKeysResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListAsymmetricEncryptionKeysResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListAsymmetricEncryptionKeysResponse { + $type: "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeysResponse"; + /** List of asymmetric KMS keys in the specified folder. */ + keys: AsymmetricEncryptionKey[]; + /** + * This token allows you to get the next page of results for list requests. If the number + * of results is greater than the specified [ListAsymmetricEncryptionKeysRequest.page_size], use + * the [next_page_token] as the value for the [ListAsymmetricEncryptionKeysRequest.page_token] query parameter + * in the next list request. Each subsequent list request will have its own + * [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface UpdateAsymmetricEncryptionKeyRequest { + $type: "yandex.cloud.kms.v1.asymmetricencryption.UpdateAsymmetricEncryptionKeyRequest"; + /** + * ID of the asymmetric KMS key to update. + * To get the ID of a asymmetric KMS key use a [AsymmetricEncryptionKeyService.List] request. + */ + keyId: string; + /** Field mask that specifies which attributes of the asymmetric KMS key are going to be updated. */ + updateMask?: FieldMask; + /** New name for the asymmetric KMS key. */ + name: string; + /** New description for the asymmetric KMS key. */ + description: string; + /** + * New status for the asymmetric KMS key. + * Using the [AsymmetricEncryptionKeyService.Update] method you can only set ACTIVE or INACTIVE status. + */ + status: AsymmetricEncryptionKey_Status; + /** Custom labels for the asymmetric KMS key as `key:value` pairs. Maximum 64 per key. */ + labels: { [key: string]: string }; + /** Flag that inhibits deletion of the asymmetric KMS key */ + deletionProtection: boolean; +} + +export interface UpdateAsymmetricEncryptionKeyRequest_LabelsEntry { + $type: "yandex.cloud.kms.v1.asymmetricencryption.UpdateAsymmetricEncryptionKeyRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateAsymmetricEncryptionKeyMetadata { + $type: "yandex.cloud.kms.v1.asymmetricencryption.UpdateAsymmetricEncryptionKeyMetadata"; + /** ID of the key being updated. */ + keyId: string; +} + +export interface DeleteAsymmetricEncryptionKeyRequest { + $type: "yandex.cloud.kms.v1.asymmetricencryption.DeleteAsymmetricEncryptionKeyRequest"; + /** ID of the key to be deleted. */ + keyId: string; +} + +export interface DeleteAsymmetricEncryptionKeyMetadata { + $type: "yandex.cloud.kms.v1.asymmetricencryption.DeleteAsymmetricEncryptionKeyMetadata"; + /** ID of the key being deleted. */ + keyId: string; +} + +export interface ListAsymmetricEncryptionKeyOperationsRequest { + $type: "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeyOperationsRequest"; + /** + * ID of the symmetric KMS key to get operations for. + * + * To get the key ID, use a [AsymmetricKeyEncryptionService.List] request. + */ + keyId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than [page_size], the service returns a [ListAsymmetricEncryptionKeyOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListAsymmetricKeyOperationsResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListAsymmetricEncryptionKeyOperationsResponse { + $type: "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeyOperationsResponse"; + /** List of operations for the specified key. */ + operations: Operation[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListAsymmetricEncryptionKeyOperationsRequest.page_size], use the [next_page_token] as the value + * for the [ListAsymmetricEncryptionKeyOperationsRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +const baseCreateAsymmetricEncryptionKeyRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.CreateAsymmetricEncryptionKeyRequest", + folderId: "", + name: "", + description: "", + encryptionAlgorithm: 0, + deletionProtection: false, +}; + +export const CreateAsymmetricEncryptionKeyRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.CreateAsymmetricEncryptionKeyRequest" as const, + + encode( + message: CreateAsymmetricEncryptionKeyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateAsymmetricEncryptionKeyRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.CreateAsymmetricEncryptionKeyRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.encryptionAlgorithm !== 0) { + writer.uint32(40).int32(message.encryptionAlgorithm); + } + if (message.deletionProtection === true) { + writer.uint32(48).bool(message.deletionProtection); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateAsymmetricEncryptionKeyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateAsymmetricEncryptionKeyRequest, + } as CreateAsymmetricEncryptionKeyRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = + CreateAsymmetricEncryptionKeyRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.encryptionAlgorithm = reader.int32() as any; + break; + case 6: + message.deletionProtection = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateAsymmetricEncryptionKeyRequest { + const message = { + ...baseCreateAsymmetricEncryptionKeyRequest, + } as CreateAsymmetricEncryptionKeyRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.encryptionAlgorithm = + object.encryptionAlgorithm !== undefined && + object.encryptionAlgorithm !== null + ? asymmetricEncryptionAlgorithmFromJSON(object.encryptionAlgorithm) + : 0; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + return message; + }, + + toJSON(message: CreateAsymmetricEncryptionKeyRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.encryptionAlgorithm !== undefined && + (obj.encryptionAlgorithm = asymmetricEncryptionAlgorithmToJSON( + message.encryptionAlgorithm + )); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateAsymmetricEncryptionKeyRequest { + const message = { + ...baseCreateAsymmetricEncryptionKeyRequest, + } as CreateAsymmetricEncryptionKeyRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.encryptionAlgorithm = object.encryptionAlgorithm ?? 0; + message.deletionProtection = object.deletionProtection ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + CreateAsymmetricEncryptionKeyRequest.$type, + CreateAsymmetricEncryptionKeyRequest +); + +const baseCreateAsymmetricEncryptionKeyRequest_LabelsEntry: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.CreateAsymmetricEncryptionKeyRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateAsymmetricEncryptionKeyRequest_LabelsEntry = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.CreateAsymmetricEncryptionKeyRequest.LabelsEntry" as const, + + encode( + message: CreateAsymmetricEncryptionKeyRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateAsymmetricEncryptionKeyRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateAsymmetricEncryptionKeyRequest_LabelsEntry, + } as CreateAsymmetricEncryptionKeyRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateAsymmetricEncryptionKeyRequest_LabelsEntry { + const message = { + ...baseCreateAsymmetricEncryptionKeyRequest_LabelsEntry, + } as CreateAsymmetricEncryptionKeyRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateAsymmetricEncryptionKeyRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): CreateAsymmetricEncryptionKeyRequest_LabelsEntry { + const message = { + ...baseCreateAsymmetricEncryptionKeyRequest_LabelsEntry, + } as CreateAsymmetricEncryptionKeyRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateAsymmetricEncryptionKeyRequest_LabelsEntry.$type, + CreateAsymmetricEncryptionKeyRequest_LabelsEntry +); + +const baseCreateAsymmetricEncryptionKeyMetadata: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.CreateAsymmetricEncryptionKeyMetadata", + keyId: "", +}; + +export const CreateAsymmetricEncryptionKeyMetadata = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.CreateAsymmetricEncryptionKeyMetadata" as const, + + encode( + message: CreateAsymmetricEncryptionKeyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateAsymmetricEncryptionKeyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateAsymmetricEncryptionKeyMetadata, + } as CreateAsymmetricEncryptionKeyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateAsymmetricEncryptionKeyMetadata { + const message = { + ...baseCreateAsymmetricEncryptionKeyMetadata, + } as CreateAsymmetricEncryptionKeyMetadata; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: CreateAsymmetricEncryptionKeyMetadata): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateAsymmetricEncryptionKeyMetadata { + const message = { + ...baseCreateAsymmetricEncryptionKeyMetadata, + } as CreateAsymmetricEncryptionKeyMetadata; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateAsymmetricEncryptionKeyMetadata.$type, + CreateAsymmetricEncryptionKeyMetadata +); + +const baseGetAsymmetricEncryptionKeyRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.GetAsymmetricEncryptionKeyRequest", + keyId: "", +}; + +export const GetAsymmetricEncryptionKeyRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.GetAsymmetricEncryptionKeyRequest" as const, + + encode( + message: GetAsymmetricEncryptionKeyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetAsymmetricEncryptionKeyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetAsymmetricEncryptionKeyRequest, + } as GetAsymmetricEncryptionKeyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetAsymmetricEncryptionKeyRequest { + const message = { + ...baseGetAsymmetricEncryptionKeyRequest, + } as GetAsymmetricEncryptionKeyRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: GetAsymmetricEncryptionKeyRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): GetAsymmetricEncryptionKeyRequest { + const message = { + ...baseGetAsymmetricEncryptionKeyRequest, + } as GetAsymmetricEncryptionKeyRequest; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GetAsymmetricEncryptionKeyRequest.$type, + GetAsymmetricEncryptionKeyRequest +); + +const baseListAsymmetricEncryptionKeysRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeysRequest", + folderId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListAsymmetricEncryptionKeysRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeysRequest" as const, + + encode( + message: ListAsymmetricEncryptionKeysRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListAsymmetricEncryptionKeysRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListAsymmetricEncryptionKeysRequest, + } as ListAsymmetricEncryptionKeysRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListAsymmetricEncryptionKeysRequest { + const message = { + ...baseListAsymmetricEncryptionKeysRequest, + } as ListAsymmetricEncryptionKeysRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListAsymmetricEncryptionKeysRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListAsymmetricEncryptionKeysRequest { + const message = { + ...baseListAsymmetricEncryptionKeysRequest, + } as ListAsymmetricEncryptionKeysRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListAsymmetricEncryptionKeysRequest.$type, + ListAsymmetricEncryptionKeysRequest +); + +const baseListAsymmetricEncryptionKeysResponse: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeysResponse", + nextPageToken: "", +}; + +export const ListAsymmetricEncryptionKeysResponse = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeysResponse" as const, + + encode( + message: ListAsymmetricEncryptionKeysResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.keys) { + AsymmetricEncryptionKey.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListAsymmetricEncryptionKeysResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListAsymmetricEncryptionKeysResponse, + } as ListAsymmetricEncryptionKeysResponse; + message.keys = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keys.push( + AsymmetricEncryptionKey.decode(reader, reader.uint32()) + ); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListAsymmetricEncryptionKeysResponse { + const message = { + ...baseListAsymmetricEncryptionKeysResponse, + } as ListAsymmetricEncryptionKeysResponse; + message.keys = (object.keys ?? []).map((e: any) => + AsymmetricEncryptionKey.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListAsymmetricEncryptionKeysResponse): unknown { + const obj: any = {}; + if (message.keys) { + obj.keys = message.keys.map((e) => + e ? AsymmetricEncryptionKey.toJSON(e) : undefined + ); + } else { + obj.keys = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListAsymmetricEncryptionKeysResponse { + const message = { + ...baseListAsymmetricEncryptionKeysResponse, + } as ListAsymmetricEncryptionKeysResponse; + message.keys = + object.keys?.map((e) => AsymmetricEncryptionKey.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListAsymmetricEncryptionKeysResponse.$type, + ListAsymmetricEncryptionKeysResponse +); + +const baseUpdateAsymmetricEncryptionKeyRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.UpdateAsymmetricEncryptionKeyRequest", + keyId: "", + name: "", + description: "", + status: 0, + deletionProtection: false, +}; + +export const UpdateAsymmetricEncryptionKeyRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.UpdateAsymmetricEncryptionKeyRequest" as const, + + encode( + message: UpdateAsymmetricEncryptionKeyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + if (message.status !== 0) { + writer.uint32(40).int32(message.status); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateAsymmetricEncryptionKeyRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.UpdateAsymmetricEncryptionKeyRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(50).fork() + ).ldelim(); + }); + if (message.deletionProtection === true) { + writer.uint32(56).bool(message.deletionProtection); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateAsymmetricEncryptionKeyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateAsymmetricEncryptionKeyRequest, + } as UpdateAsymmetricEncryptionKeyRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + message.status = reader.int32() as any; + break; + case 6: + const entry6 = + UpdateAsymmetricEncryptionKeyRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry6.value !== undefined) { + message.labels[entry6.key] = entry6.value; + } + break; + case 7: + message.deletionProtection = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateAsymmetricEncryptionKeyRequest { + const message = { + ...baseUpdateAsymmetricEncryptionKeyRequest, + } as UpdateAsymmetricEncryptionKeyRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.status = + object.status !== undefined && object.status !== null + ? asymmetricEncryptionKey_StatusFromJSON(object.status) + : 0; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + return message; + }, + + toJSON(message: UpdateAsymmetricEncryptionKeyRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + message.status !== undefined && + (obj.status = asymmetricEncryptionKey_StatusToJSON(message.status)); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateAsymmetricEncryptionKeyRequest { + const message = { + ...baseUpdateAsymmetricEncryptionKeyRequest, + } as UpdateAsymmetricEncryptionKeyRequest; + message.keyId = object.keyId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.status = object.status ?? 0; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.deletionProtection = object.deletionProtection ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateAsymmetricEncryptionKeyRequest.$type, + UpdateAsymmetricEncryptionKeyRequest +); + +const baseUpdateAsymmetricEncryptionKeyRequest_LabelsEntry: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.UpdateAsymmetricEncryptionKeyRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateAsymmetricEncryptionKeyRequest_LabelsEntry = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.UpdateAsymmetricEncryptionKeyRequest.LabelsEntry" as const, + + encode( + message: UpdateAsymmetricEncryptionKeyRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateAsymmetricEncryptionKeyRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateAsymmetricEncryptionKeyRequest_LabelsEntry, + } as UpdateAsymmetricEncryptionKeyRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateAsymmetricEncryptionKeyRequest_LabelsEntry { + const message = { + ...baseUpdateAsymmetricEncryptionKeyRequest_LabelsEntry, + } as UpdateAsymmetricEncryptionKeyRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateAsymmetricEncryptionKeyRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): UpdateAsymmetricEncryptionKeyRequest_LabelsEntry { + const message = { + ...baseUpdateAsymmetricEncryptionKeyRequest_LabelsEntry, + } as UpdateAsymmetricEncryptionKeyRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateAsymmetricEncryptionKeyRequest_LabelsEntry.$type, + UpdateAsymmetricEncryptionKeyRequest_LabelsEntry +); + +const baseUpdateAsymmetricEncryptionKeyMetadata: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.UpdateAsymmetricEncryptionKeyMetadata", + keyId: "", +}; + +export const UpdateAsymmetricEncryptionKeyMetadata = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.UpdateAsymmetricEncryptionKeyMetadata" as const, + + encode( + message: UpdateAsymmetricEncryptionKeyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateAsymmetricEncryptionKeyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateAsymmetricEncryptionKeyMetadata, + } as UpdateAsymmetricEncryptionKeyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateAsymmetricEncryptionKeyMetadata { + const message = { + ...baseUpdateAsymmetricEncryptionKeyMetadata, + } as UpdateAsymmetricEncryptionKeyMetadata; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: UpdateAsymmetricEncryptionKeyMetadata): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateAsymmetricEncryptionKeyMetadata { + const message = { + ...baseUpdateAsymmetricEncryptionKeyMetadata, + } as UpdateAsymmetricEncryptionKeyMetadata; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateAsymmetricEncryptionKeyMetadata.$type, + UpdateAsymmetricEncryptionKeyMetadata +); + +const baseDeleteAsymmetricEncryptionKeyRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.DeleteAsymmetricEncryptionKeyRequest", + keyId: "", +}; + +export const DeleteAsymmetricEncryptionKeyRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.DeleteAsymmetricEncryptionKeyRequest" as const, + + encode( + message: DeleteAsymmetricEncryptionKeyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteAsymmetricEncryptionKeyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteAsymmetricEncryptionKeyRequest, + } as DeleteAsymmetricEncryptionKeyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteAsymmetricEncryptionKeyRequest { + const message = { + ...baseDeleteAsymmetricEncryptionKeyRequest, + } as DeleteAsymmetricEncryptionKeyRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: DeleteAsymmetricEncryptionKeyRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): DeleteAsymmetricEncryptionKeyRequest { + const message = { + ...baseDeleteAsymmetricEncryptionKeyRequest, + } as DeleteAsymmetricEncryptionKeyRequest; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteAsymmetricEncryptionKeyRequest.$type, + DeleteAsymmetricEncryptionKeyRequest +); + +const baseDeleteAsymmetricEncryptionKeyMetadata: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.DeleteAsymmetricEncryptionKeyMetadata", + keyId: "", +}; + +export const DeleteAsymmetricEncryptionKeyMetadata = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.DeleteAsymmetricEncryptionKeyMetadata" as const, + + encode( + message: DeleteAsymmetricEncryptionKeyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteAsymmetricEncryptionKeyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteAsymmetricEncryptionKeyMetadata, + } as DeleteAsymmetricEncryptionKeyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteAsymmetricEncryptionKeyMetadata { + const message = { + ...baseDeleteAsymmetricEncryptionKeyMetadata, + } as DeleteAsymmetricEncryptionKeyMetadata; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: DeleteAsymmetricEncryptionKeyMetadata): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): DeleteAsymmetricEncryptionKeyMetadata { + const message = { + ...baseDeleteAsymmetricEncryptionKeyMetadata, + } as DeleteAsymmetricEncryptionKeyMetadata; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteAsymmetricEncryptionKeyMetadata.$type, + DeleteAsymmetricEncryptionKeyMetadata +); + +const baseListAsymmetricEncryptionKeyOperationsRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeyOperationsRequest", + keyId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListAsymmetricEncryptionKeyOperationsRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeyOperationsRequest" as const, + + encode( + message: ListAsymmetricEncryptionKeyOperationsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListAsymmetricEncryptionKeyOperationsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListAsymmetricEncryptionKeyOperationsRequest, + } as ListAsymmetricEncryptionKeyOperationsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListAsymmetricEncryptionKeyOperationsRequest { + const message = { + ...baseListAsymmetricEncryptionKeyOperationsRequest, + } as ListAsymmetricEncryptionKeyOperationsRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListAsymmetricEncryptionKeyOperationsRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): ListAsymmetricEncryptionKeyOperationsRequest { + const message = { + ...baseListAsymmetricEncryptionKeyOperationsRequest, + } as ListAsymmetricEncryptionKeyOperationsRequest; + message.keyId = object.keyId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListAsymmetricEncryptionKeyOperationsRequest.$type, + ListAsymmetricEncryptionKeyOperationsRequest +); + +const baseListAsymmetricEncryptionKeyOperationsResponse: object = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeyOperationsResponse", + nextPageToken: "", +}; + +export const ListAsymmetricEncryptionKeyOperationsResponse = { + $type: + "yandex.cloud.kms.v1.asymmetricencryption.ListAsymmetricEncryptionKeyOperationsResponse" as const, + + encode( + message: ListAsymmetricEncryptionKeyOperationsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.operations) { + Operation.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListAsymmetricEncryptionKeyOperationsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListAsymmetricEncryptionKeyOperationsResponse, + } as ListAsymmetricEncryptionKeyOperationsResponse; + message.operations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operations.push(Operation.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListAsymmetricEncryptionKeyOperationsResponse { + const message = { + ...baseListAsymmetricEncryptionKeyOperationsResponse, + } as ListAsymmetricEncryptionKeyOperationsResponse; + message.operations = (object.operations ?? []).map((e: any) => + Operation.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListAsymmetricEncryptionKeyOperationsResponse): unknown { + const obj: any = {}; + if (message.operations) { + obj.operations = message.operations.map((e) => + e ? Operation.toJSON(e) : undefined + ); + } else { + obj.operations = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): ListAsymmetricEncryptionKeyOperationsResponse { + const message = { + ...baseListAsymmetricEncryptionKeyOperationsResponse, + } as ListAsymmetricEncryptionKeyOperationsResponse; + message.operations = + object.operations?.map((e) => Operation.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListAsymmetricEncryptionKeyOperationsResponse.$type, + ListAsymmetricEncryptionKeyOperationsResponse +); + +/** Set of methods for managing asymmetric KMS keys. */ +export const AsymmetricEncryptionKeyServiceService = { + /** + * control plane + * Creates an asymmetric KMS key in the specified folder. + */ + create: { + path: "/yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKeyService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateAsymmetricEncryptionKeyRequest) => + Buffer.from(CreateAsymmetricEncryptionKeyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + CreateAsymmetricEncryptionKeyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Returns the specified asymmetric KMS key. + * + * To get the list of available asymmetric KMS keys, make a [SymmetricKeyService.List] request. + */ + get: { + path: "/yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKeyService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetAsymmetricEncryptionKeyRequest) => + Buffer.from(GetAsymmetricEncryptionKeyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + GetAsymmetricEncryptionKeyRequest.decode(value), + responseSerialize: (value: AsymmetricEncryptionKey) => + Buffer.from(AsymmetricEncryptionKey.encode(value).finish()), + responseDeserialize: (value: Buffer) => + AsymmetricEncryptionKey.decode(value), + }, + /** Returns the list of asymmetric KMS keys in the specified folder. */ + list: { + path: "/yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKeyService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAsymmetricEncryptionKeysRequest) => + Buffer.from(ListAsymmetricEncryptionKeysRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListAsymmetricEncryptionKeysRequest.decode(value), + responseSerialize: (value: ListAsymmetricEncryptionKeysResponse) => + Buffer.from(ListAsymmetricEncryptionKeysResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListAsymmetricEncryptionKeysResponse.decode(value), + }, + /** Updates the specified asymmetric KMS key. */ + update: { + path: "/yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKeyService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAsymmetricEncryptionKeyRequest) => + Buffer.from(UpdateAsymmetricEncryptionKeyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAsymmetricEncryptionKeyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Deletes the specified asymmetric KMS key. This action also automatically schedules + * the destruction of all of the key's versions in 72 hours. + * + * The key and its versions appear absent in [AsymmetricEncryptionKeyService.Get] and [AsymmetricEncryptionKeyService.List] + * requests, but can be restored within 72 hours with a request to tech support. + */ + delete: { + path: "/yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKeyService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteAsymmetricEncryptionKeyRequest) => + Buffer.from(DeleteAsymmetricEncryptionKeyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteAsymmetricEncryptionKeyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Lists operations for the specified asymmetric KMS key. */ + listOperations: { + path: "/yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKeyService/ListOperations", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAsymmetricEncryptionKeyOperationsRequest) => + Buffer.from( + ListAsymmetricEncryptionKeyOperationsRequest.encode(value).finish() + ), + requestDeserialize: (value: Buffer) => + ListAsymmetricEncryptionKeyOperationsRequest.decode(value), + responseSerialize: (value: ListAsymmetricEncryptionKeyOperationsResponse) => + Buffer.from( + ListAsymmetricEncryptionKeyOperationsResponse.encode(value).finish() + ), + responseDeserialize: (value: Buffer) => + ListAsymmetricEncryptionKeyOperationsResponse.decode(value), + }, + /** Lists existing access bindings for the specified key. */ + listAccessBindings: { + path: "/yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKeyService/ListAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAccessBindingsRequest) => + Buffer.from(ListAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListAccessBindingsRequest.decode(value), + responseSerialize: (value: ListAccessBindingsResponse) => + Buffer.from(ListAccessBindingsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListAccessBindingsResponse.decode(value), + }, + /** Sets access bindings for the key. */ + setAccessBindings: { + path: "/yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKeyService/SetAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: SetAccessBindingsRequest) => + Buffer.from(SetAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + SetAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates access bindings for the specified key. */ + updateAccessBindings: { + path: "/yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKeyService/UpdateAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAccessBindingsRequest) => + Buffer.from(UpdateAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface AsymmetricEncryptionKeyServiceServer + extends UntypedServiceImplementation { + /** + * control plane + * Creates an asymmetric KMS key in the specified folder. + */ + create: handleUnaryCall; + /** + * Returns the specified asymmetric KMS key. + * + * To get the list of available asymmetric KMS keys, make a [SymmetricKeyService.List] request. + */ + get: handleUnaryCall< + GetAsymmetricEncryptionKeyRequest, + AsymmetricEncryptionKey + >; + /** Returns the list of asymmetric KMS keys in the specified folder. */ + list: handleUnaryCall< + ListAsymmetricEncryptionKeysRequest, + ListAsymmetricEncryptionKeysResponse + >; + /** Updates the specified asymmetric KMS key. */ + update: handleUnaryCall; + /** + * Deletes the specified asymmetric KMS key. This action also automatically schedules + * the destruction of all of the key's versions in 72 hours. + * + * The key and its versions appear absent in [AsymmetricEncryptionKeyService.Get] and [AsymmetricEncryptionKeyService.List] + * requests, but can be restored within 72 hours with a request to tech support. + */ + delete: handleUnaryCall; + /** Lists operations for the specified asymmetric KMS key. */ + listOperations: handleUnaryCall< + ListAsymmetricEncryptionKeyOperationsRequest, + ListAsymmetricEncryptionKeyOperationsResponse + >; + /** Lists existing access bindings for the specified key. */ + listAccessBindings: handleUnaryCall< + ListAccessBindingsRequest, + ListAccessBindingsResponse + >; + /** Sets access bindings for the key. */ + setAccessBindings: handleUnaryCall; + /** Updates access bindings for the specified key. */ + updateAccessBindings: handleUnaryCall; +} + +export interface AsymmetricEncryptionKeyServiceClient extends Client { + /** + * control plane + * Creates an asymmetric KMS key in the specified folder. + */ + create( + request: CreateAsymmetricEncryptionKeyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateAsymmetricEncryptionKeyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateAsymmetricEncryptionKeyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Returns the specified asymmetric KMS key. + * + * To get the list of available asymmetric KMS keys, make a [SymmetricKeyService.List] request. + */ + get( + request: GetAsymmetricEncryptionKeyRequest, + callback: ( + error: ServiceError | null, + response: AsymmetricEncryptionKey + ) => void + ): ClientUnaryCall; + get( + request: GetAsymmetricEncryptionKeyRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: AsymmetricEncryptionKey + ) => void + ): ClientUnaryCall; + get( + request: GetAsymmetricEncryptionKeyRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: AsymmetricEncryptionKey + ) => void + ): ClientUnaryCall; + /** Returns the list of asymmetric KMS keys in the specified folder. */ + list( + request: ListAsymmetricEncryptionKeysRequest, + callback: ( + error: ServiceError | null, + response: ListAsymmetricEncryptionKeysResponse + ) => void + ): ClientUnaryCall; + list( + request: ListAsymmetricEncryptionKeysRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAsymmetricEncryptionKeysResponse + ) => void + ): ClientUnaryCall; + list( + request: ListAsymmetricEncryptionKeysRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAsymmetricEncryptionKeysResponse + ) => void + ): ClientUnaryCall; + /** Updates the specified asymmetric KMS key. */ + update( + request: UpdateAsymmetricEncryptionKeyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateAsymmetricEncryptionKeyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateAsymmetricEncryptionKeyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Deletes the specified asymmetric KMS key. This action also automatically schedules + * the destruction of all of the key's versions in 72 hours. + * + * The key and its versions appear absent in [AsymmetricEncryptionKeyService.Get] and [AsymmetricEncryptionKeyService.List] + * requests, but can be restored within 72 hours with a request to tech support. + */ + delete( + request: DeleteAsymmetricEncryptionKeyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteAsymmetricEncryptionKeyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteAsymmetricEncryptionKeyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Lists operations for the specified asymmetric KMS key. */ + listOperations( + request: ListAsymmetricEncryptionKeyOperationsRequest, + callback: ( + error: ServiceError | null, + response: ListAsymmetricEncryptionKeyOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListAsymmetricEncryptionKeyOperationsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAsymmetricEncryptionKeyOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListAsymmetricEncryptionKeyOperationsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAsymmetricEncryptionKeyOperationsResponse + ) => void + ): ClientUnaryCall; + /** Lists existing access bindings for the specified key. */ + listAccessBindings( + request: ListAccessBindingsRequest, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + /** Sets access bindings for the key. */ + setAccessBindings( + request: SetAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates access bindings for the specified key. */ + updateAccessBindings( + request: UpdateAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const AsymmetricEncryptionKeyServiceClient = + makeGenericClientConstructor( + AsymmetricEncryptionKeyServiceService, + "yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKeyService" + ) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): AsymmetricEncryptionKeyServiceClient; + service: typeof AsymmetricEncryptionKeyServiceService; + }; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_crypto_service.ts b/src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_crypto_service.ts new file mode 100644 index 00000000..2668051b --- /dev/null +++ b/src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_crypto_service.ts @@ -0,0 +1,811 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.kms.v1.asymmetricsignature"; + +export interface AsymmetricSignRequest { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignRequest"; + /** ID of the asymmetric KMS key to use for signature. */ + keyId: string; + /** + * Message to sign. + * Should be encoded with base64. + */ + message: Buffer; +} + +export interface AsymmetricSignResponse { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignResponse"; + /** ID of the asymmetric KMS key that was used for signature. */ + keyId: string; + /** + * Value of signature. + * Signature value is produced in accordance with RFC 8017 for RSA + * and is a DER-encoded object as defined by ANSI X9.62-2005 and RFC 3279 Section 2.2.3 for ECDSA. + */ + signature: Buffer; +} + +export interface AsymmetricSignHashRequest { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignHashRequest"; + /** ID of the asymmetric KMS key to use for signature. */ + keyId: string; + /** + * Hash value to be signed. + * Should be encoded with base64. + */ + hash: Buffer; +} + +export interface AsymmetricSignHashResponse { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignHashResponse"; + /** ID of the asymmetric KMS key that was used for signature. */ + keyId: string; + /** + * Value of signature. + * Signature value is produced in accordance with RFC 8017 for RSA + * and is a DER-encoded object as defined by ANSI X9.62-2005 and RFC 3279 Section 2.2.3 for ECDSA. + */ + signature: Buffer; +} + +export interface AsymmetricGetPublicKeyRequest { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricGetPublicKeyRequest"; + /** ID of the asymmetric KMS key to be used for public key retrieval. */ + keyId: string; +} + +export interface AsymmetricGetPublicKeyResponse { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricGetPublicKeyResponse"; + /** ID of the asymmetric KMS key to get public key of. */ + keyId: string; + /** + * Public key value. + * The value is a PEM-encoded X.509 public key, also known as SubjectPublicKeyInfo (SPKI), + * as defined in RFC 5280. + */ + publicKey: string; +} + +const baseAsymmetricSignRequest: object = { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignRequest", + keyId: "", +}; + +export const AsymmetricSignRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignRequest" as const, + + encode( + message: AsymmetricSignRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.message.length !== 0) { + writer.uint32(18).bytes(message.message); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricSignRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAsymmetricSignRequest } as AsymmetricSignRequest; + message.message = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.message = reader.bytes() as Buffer; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricSignRequest { + const message = { ...baseAsymmetricSignRequest } as AsymmetricSignRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.message = + object.message !== undefined && object.message !== null + ? Buffer.from(bytesFromBase64(object.message)) + : Buffer.alloc(0); + return message; + }, + + toJSON(message: AsymmetricSignRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.message !== undefined && + (obj.message = base64FromBytes( + message.message !== undefined ? message.message : Buffer.alloc(0) + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricSignRequest { + const message = { ...baseAsymmetricSignRequest } as AsymmetricSignRequest; + message.keyId = object.keyId ?? ""; + message.message = object.message ?? Buffer.alloc(0); + return message; + }, +}; + +messageTypeRegistry.set(AsymmetricSignRequest.$type, AsymmetricSignRequest); + +const baseAsymmetricSignResponse: object = { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignResponse", + keyId: "", +}; + +export const AsymmetricSignResponse = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignResponse" as const, + + encode( + message: AsymmetricSignResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.signature.length !== 0) { + writer.uint32(18).bytes(message.signature); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricSignResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAsymmetricSignResponse } as AsymmetricSignResponse; + message.signature = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.signature = reader.bytes() as Buffer; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricSignResponse { + const message = { ...baseAsymmetricSignResponse } as AsymmetricSignResponse; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.signature = + object.signature !== undefined && object.signature !== null + ? Buffer.from(bytesFromBase64(object.signature)) + : Buffer.alloc(0); + return message; + }, + + toJSON(message: AsymmetricSignResponse): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.signature !== undefined && + (obj.signature = base64FromBytes( + message.signature !== undefined ? message.signature : Buffer.alloc(0) + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricSignResponse { + const message = { ...baseAsymmetricSignResponse } as AsymmetricSignResponse; + message.keyId = object.keyId ?? ""; + message.signature = object.signature ?? Buffer.alloc(0); + return message; + }, +}; + +messageTypeRegistry.set(AsymmetricSignResponse.$type, AsymmetricSignResponse); + +const baseAsymmetricSignHashRequest: object = { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignHashRequest", + keyId: "", +}; + +export const AsymmetricSignHashRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignHashRequest" as const, + + encode( + message: AsymmetricSignHashRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.hash.length !== 0) { + writer.uint32(18).bytes(message.hash); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricSignHashRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsymmetricSignHashRequest, + } as AsymmetricSignHashRequest; + message.hash = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.hash = reader.bytes() as Buffer; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricSignHashRequest { + const message = { + ...baseAsymmetricSignHashRequest, + } as AsymmetricSignHashRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.hash = + object.hash !== undefined && object.hash !== null + ? Buffer.from(bytesFromBase64(object.hash)) + : Buffer.alloc(0); + return message; + }, + + toJSON(message: AsymmetricSignHashRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.hash !== undefined && + (obj.hash = base64FromBytes( + message.hash !== undefined ? message.hash : Buffer.alloc(0) + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricSignHashRequest { + const message = { + ...baseAsymmetricSignHashRequest, + } as AsymmetricSignHashRequest; + message.keyId = object.keyId ?? ""; + message.hash = object.hash ?? Buffer.alloc(0); + return message; + }, +}; + +messageTypeRegistry.set( + AsymmetricSignHashRequest.$type, + AsymmetricSignHashRequest +); + +const baseAsymmetricSignHashResponse: object = { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignHashResponse", + keyId: "", +}; + +export const AsymmetricSignHashResponse = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignHashResponse" as const, + + encode( + message: AsymmetricSignHashResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.signature.length !== 0) { + writer.uint32(18).bytes(message.signature); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricSignHashResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsymmetricSignHashResponse, + } as AsymmetricSignHashResponse; + message.signature = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.signature = reader.bytes() as Buffer; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricSignHashResponse { + const message = { + ...baseAsymmetricSignHashResponse, + } as AsymmetricSignHashResponse; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.signature = + object.signature !== undefined && object.signature !== null + ? Buffer.from(bytesFromBase64(object.signature)) + : Buffer.alloc(0); + return message; + }, + + toJSON(message: AsymmetricSignHashResponse): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.signature !== undefined && + (obj.signature = base64FromBytes( + message.signature !== undefined ? message.signature : Buffer.alloc(0) + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricSignHashResponse { + const message = { + ...baseAsymmetricSignHashResponse, + } as AsymmetricSignHashResponse; + message.keyId = object.keyId ?? ""; + message.signature = object.signature ?? Buffer.alloc(0); + return message; + }, +}; + +messageTypeRegistry.set( + AsymmetricSignHashResponse.$type, + AsymmetricSignHashResponse +); + +const baseAsymmetricGetPublicKeyRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricGetPublicKeyRequest", + keyId: "", +}; + +export const AsymmetricGetPublicKeyRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricGetPublicKeyRequest" as const, + + encode( + message: AsymmetricGetPublicKeyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricGetPublicKeyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsymmetricGetPublicKeyRequest, + } as AsymmetricGetPublicKeyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricGetPublicKeyRequest { + const message = { + ...baseAsymmetricGetPublicKeyRequest, + } as AsymmetricGetPublicKeyRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: AsymmetricGetPublicKeyRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricGetPublicKeyRequest { + const message = { + ...baseAsymmetricGetPublicKeyRequest, + } as AsymmetricGetPublicKeyRequest; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AsymmetricGetPublicKeyRequest.$type, + AsymmetricGetPublicKeyRequest +); + +const baseAsymmetricGetPublicKeyResponse: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricGetPublicKeyResponse", + keyId: "", + publicKey: "", +}; + +export const AsymmetricGetPublicKeyResponse = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricGetPublicKeyResponse" as const, + + encode( + message: AsymmetricGetPublicKeyResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.publicKey !== "") { + writer.uint32(18).string(message.publicKey); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricGetPublicKeyResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsymmetricGetPublicKeyResponse, + } as AsymmetricGetPublicKeyResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.publicKey = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricGetPublicKeyResponse { + const message = { + ...baseAsymmetricGetPublicKeyResponse, + } as AsymmetricGetPublicKeyResponse; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.publicKey = + object.publicKey !== undefined && object.publicKey !== null + ? String(object.publicKey) + : ""; + return message; + }, + + toJSON(message: AsymmetricGetPublicKeyResponse): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.publicKey !== undefined && (obj.publicKey = message.publicKey); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricGetPublicKeyResponse { + const message = { + ...baseAsymmetricGetPublicKeyResponse, + } as AsymmetricGetPublicKeyResponse; + message.keyId = object.keyId ?? ""; + message.publicKey = object.publicKey ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AsymmetricGetPublicKeyResponse.$type, + AsymmetricGetPublicKeyResponse +); + +/** Set of methods that perform asymmetric signature. */ +export const AsymmetricSignatureCryptoServiceService = { + /** Signs data specified KMS key. */ + sign: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureCryptoService/Sign", + requestStream: false, + responseStream: false, + requestSerialize: (value: AsymmetricSignRequest) => + Buffer.from(AsymmetricSignRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => AsymmetricSignRequest.decode(value), + responseSerialize: (value: AsymmetricSignResponse) => + Buffer.from(AsymmetricSignResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + AsymmetricSignResponse.decode(value), + }, + /** Signs hash value specified KMS key. */ + signHash: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureCryptoService/SignHash", + requestStream: false, + responseStream: false, + requestSerialize: (value: AsymmetricSignHashRequest) => + Buffer.from(AsymmetricSignHashRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + AsymmetricSignHashRequest.decode(value), + responseSerialize: (value: AsymmetricSignHashResponse) => + Buffer.from(AsymmetricSignHashResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + AsymmetricSignHashResponse.decode(value), + }, + /** Gets value of public key. */ + getPublicKey: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureCryptoService/GetPublicKey", + requestStream: false, + responseStream: false, + requestSerialize: (value: AsymmetricGetPublicKeyRequest) => + Buffer.from(AsymmetricGetPublicKeyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + AsymmetricGetPublicKeyRequest.decode(value), + responseSerialize: (value: AsymmetricGetPublicKeyResponse) => + Buffer.from(AsymmetricGetPublicKeyResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + AsymmetricGetPublicKeyResponse.decode(value), + }, +} as const; + +export interface AsymmetricSignatureCryptoServiceServer + extends UntypedServiceImplementation { + /** Signs data specified KMS key. */ + sign: handleUnaryCall; + /** Signs hash value specified KMS key. */ + signHash: handleUnaryCall< + AsymmetricSignHashRequest, + AsymmetricSignHashResponse + >; + /** Gets value of public key. */ + getPublicKey: handleUnaryCall< + AsymmetricGetPublicKeyRequest, + AsymmetricGetPublicKeyResponse + >; +} + +export interface AsymmetricSignatureCryptoServiceClient extends Client { + /** Signs data specified KMS key. */ + sign( + request: AsymmetricSignRequest, + callback: ( + error: ServiceError | null, + response: AsymmetricSignResponse + ) => void + ): ClientUnaryCall; + sign( + request: AsymmetricSignRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: AsymmetricSignResponse + ) => void + ): ClientUnaryCall; + sign( + request: AsymmetricSignRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: AsymmetricSignResponse + ) => void + ): ClientUnaryCall; + /** Signs hash value specified KMS key. */ + signHash( + request: AsymmetricSignHashRequest, + callback: ( + error: ServiceError | null, + response: AsymmetricSignHashResponse + ) => void + ): ClientUnaryCall; + signHash( + request: AsymmetricSignHashRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: AsymmetricSignHashResponse + ) => void + ): ClientUnaryCall; + signHash( + request: AsymmetricSignHashRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: AsymmetricSignHashResponse + ) => void + ): ClientUnaryCall; + /** Gets value of public key. */ + getPublicKey( + request: AsymmetricGetPublicKeyRequest, + callback: ( + error: ServiceError | null, + response: AsymmetricGetPublicKeyResponse + ) => void + ): ClientUnaryCall; + getPublicKey( + request: AsymmetricGetPublicKeyRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: AsymmetricGetPublicKeyResponse + ) => void + ): ClientUnaryCall; + getPublicKey( + request: AsymmetricGetPublicKeyRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: AsymmetricGetPublicKeyResponse + ) => void + ): ClientUnaryCall; +} + +export const AsymmetricSignatureCryptoServiceClient = + makeGenericClientConstructor( + AsymmetricSignatureCryptoServiceService, + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureCryptoService" + ) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): AsymmetricSignatureCryptoServiceClient; + service: typeof AsymmetricSignatureCryptoServiceService; + }; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (const byte of arr) { + bin.push(String.fromCharCode(byte)); + } + return btoa(bin.join("")); +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_key.ts b/src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_key.ts new file mode 100644 index 00000000..b971a98f --- /dev/null +++ b/src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_key.ts @@ -0,0 +1,565 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.kms.v1.asymmetricsignature"; + +/** Supported asymmetric signature algorithms. */ +export enum AsymmetricSignatureAlgorithm { + ASYMMETRIC_SIGNATURE_ALGORITHM_UNSPECIFIED = 0, + /** RSA_2048_SIGN_PSS_SHA_256 - RSA-2048 signature with PSS padding and SHA-256 */ + RSA_2048_SIGN_PSS_SHA_256 = 1, + /** RSA_2048_SIGN_PSS_SHA_384 - RSA-2048 signature with PSS padding and SHA-384 */ + RSA_2048_SIGN_PSS_SHA_384 = 2, + /** RSA_2048_SIGN_PSS_SHA_512 - RSA-2048 signature with PSS padding and SHA-512 */ + RSA_2048_SIGN_PSS_SHA_512 = 3, + /** RSA_3072_SIGN_PSS_SHA_256 - RSA-3072 signature with PSS padding and SHA-256 */ + RSA_3072_SIGN_PSS_SHA_256 = 4, + /** RSA_3072_SIGN_PSS_SHA_384 - RSA-3072 signature with PSS padding and SHA-384 */ + RSA_3072_SIGN_PSS_SHA_384 = 5, + /** RSA_3072_SIGN_PSS_SHA_512 - RSA-3072 signature with PSS padding and SHA-512 */ + RSA_3072_SIGN_PSS_SHA_512 = 6, + /** RSA_4096_SIGN_PSS_SHA_256 - RSA-4096 signature with PSS padding and SHA-256 */ + RSA_4096_SIGN_PSS_SHA_256 = 7, + /** RSA_4096_SIGN_PSS_SHA_384 - RSA-4096 signature with PSS padding and SHA-384 */ + RSA_4096_SIGN_PSS_SHA_384 = 8, + /** RSA_4096_SIGN_PSS_SHA_512 - RSA-4096 signature with PSS padding and SHA-512 */ + RSA_4096_SIGN_PSS_SHA_512 = 9, + /** ECDSA_NIST_P256_SHA_256 - ECDSA signature with NIST P-256 curve and SHA-256 */ + ECDSA_NIST_P256_SHA_256 = 10, + /** ECDSA_NIST_P384_SHA_384 - ECDSA signature with NIST P-384 curve and SHA-384 */ + ECDSA_NIST_P384_SHA_384 = 11, + /** ECDSA_NIST_P521_SHA_512 - ECDSA signature with NIST P-521 curve and SHA-512 */ + ECDSA_NIST_P521_SHA_512 = 12, + /** ECDSA_SECP256_K1_SHA_256 - ECDSA signature with SECP256_K1 curve and SHA-256 */ + ECDSA_SECP256_K1_SHA_256 = 13, + UNRECOGNIZED = -1, +} + +export function asymmetricSignatureAlgorithmFromJSON( + object: any +): AsymmetricSignatureAlgorithm { + switch (object) { + case 0: + case "ASYMMETRIC_SIGNATURE_ALGORITHM_UNSPECIFIED": + return AsymmetricSignatureAlgorithm.ASYMMETRIC_SIGNATURE_ALGORITHM_UNSPECIFIED; + case 1: + case "RSA_2048_SIGN_PSS_SHA_256": + return AsymmetricSignatureAlgorithm.RSA_2048_SIGN_PSS_SHA_256; + case 2: + case "RSA_2048_SIGN_PSS_SHA_384": + return AsymmetricSignatureAlgorithm.RSA_2048_SIGN_PSS_SHA_384; + case 3: + case "RSA_2048_SIGN_PSS_SHA_512": + return AsymmetricSignatureAlgorithm.RSA_2048_SIGN_PSS_SHA_512; + case 4: + case "RSA_3072_SIGN_PSS_SHA_256": + return AsymmetricSignatureAlgorithm.RSA_3072_SIGN_PSS_SHA_256; + case 5: + case "RSA_3072_SIGN_PSS_SHA_384": + return AsymmetricSignatureAlgorithm.RSA_3072_SIGN_PSS_SHA_384; + case 6: + case "RSA_3072_SIGN_PSS_SHA_512": + return AsymmetricSignatureAlgorithm.RSA_3072_SIGN_PSS_SHA_512; + case 7: + case "RSA_4096_SIGN_PSS_SHA_256": + return AsymmetricSignatureAlgorithm.RSA_4096_SIGN_PSS_SHA_256; + case 8: + case "RSA_4096_SIGN_PSS_SHA_384": + return AsymmetricSignatureAlgorithm.RSA_4096_SIGN_PSS_SHA_384; + case 9: + case "RSA_4096_SIGN_PSS_SHA_512": + return AsymmetricSignatureAlgorithm.RSA_4096_SIGN_PSS_SHA_512; + case 10: + case "ECDSA_NIST_P256_SHA_256": + return AsymmetricSignatureAlgorithm.ECDSA_NIST_P256_SHA_256; + case 11: + case "ECDSA_NIST_P384_SHA_384": + return AsymmetricSignatureAlgorithm.ECDSA_NIST_P384_SHA_384; + case 12: + case "ECDSA_NIST_P521_SHA_512": + return AsymmetricSignatureAlgorithm.ECDSA_NIST_P521_SHA_512; + case 13: + case "ECDSA_SECP256_K1_SHA_256": + return AsymmetricSignatureAlgorithm.ECDSA_SECP256_K1_SHA_256; + case -1: + case "UNRECOGNIZED": + default: + return AsymmetricSignatureAlgorithm.UNRECOGNIZED; + } +} + +export function asymmetricSignatureAlgorithmToJSON( + object: AsymmetricSignatureAlgorithm +): string { + switch (object) { + case AsymmetricSignatureAlgorithm.ASYMMETRIC_SIGNATURE_ALGORITHM_UNSPECIFIED: + return "ASYMMETRIC_SIGNATURE_ALGORITHM_UNSPECIFIED"; + case AsymmetricSignatureAlgorithm.RSA_2048_SIGN_PSS_SHA_256: + return "RSA_2048_SIGN_PSS_SHA_256"; + case AsymmetricSignatureAlgorithm.RSA_2048_SIGN_PSS_SHA_384: + return "RSA_2048_SIGN_PSS_SHA_384"; + case AsymmetricSignatureAlgorithm.RSA_2048_SIGN_PSS_SHA_512: + return "RSA_2048_SIGN_PSS_SHA_512"; + case AsymmetricSignatureAlgorithm.RSA_3072_SIGN_PSS_SHA_256: + return "RSA_3072_SIGN_PSS_SHA_256"; + case AsymmetricSignatureAlgorithm.RSA_3072_SIGN_PSS_SHA_384: + return "RSA_3072_SIGN_PSS_SHA_384"; + case AsymmetricSignatureAlgorithm.RSA_3072_SIGN_PSS_SHA_512: + return "RSA_3072_SIGN_PSS_SHA_512"; + case AsymmetricSignatureAlgorithm.RSA_4096_SIGN_PSS_SHA_256: + return "RSA_4096_SIGN_PSS_SHA_256"; + case AsymmetricSignatureAlgorithm.RSA_4096_SIGN_PSS_SHA_384: + return "RSA_4096_SIGN_PSS_SHA_384"; + case AsymmetricSignatureAlgorithm.RSA_4096_SIGN_PSS_SHA_512: + return "RSA_4096_SIGN_PSS_SHA_512"; + case AsymmetricSignatureAlgorithm.ECDSA_NIST_P256_SHA_256: + return "ECDSA_NIST_P256_SHA_256"; + case AsymmetricSignatureAlgorithm.ECDSA_NIST_P384_SHA_384: + return "ECDSA_NIST_P384_SHA_384"; + case AsymmetricSignatureAlgorithm.ECDSA_NIST_P521_SHA_512: + return "ECDSA_NIST_P521_SHA_512"; + case AsymmetricSignatureAlgorithm.ECDSA_SECP256_K1_SHA_256: + return "ECDSA_SECP256_K1_SHA_256"; + default: + return "UNKNOWN"; + } +} + +/** An asymmetric KMS key that may contain several versions of the cryptographic material. */ +export interface AsymmetricSignatureKey { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKey"; + /** ID of the key. */ + id: string; + /** ID of the folder that the key belongs to. */ + folderId: string; + /** Time when the key was created. */ + createdAt?: Date; + /** Name of the key. */ + name: string; + /** Description of the key. */ + description: string; + /** Custom labels for the key as `key:value` pairs. Maximum 64 per key. */ + labels: { [key: string]: string }; + /** Current status of the key. */ + status: AsymmetricSignatureKey_Status; + /** Signature Algorithm ID. */ + signatureAlgorithm: AsymmetricSignatureAlgorithm; + /** Flag that inhibits deletion of the key */ + deletionProtection: boolean; +} + +export enum AsymmetricSignatureKey_Status { + STATUS_UNSPECIFIED = 0, + /** CREATING - The key is being created. */ + CREATING = 1, + /** + * ACTIVE - The key is active and can be used for encryption and decryption or signature and verification. + * Can be set to INACTIVE using the [AsymmetricKeyService.Update] method. + */ + ACTIVE = 2, + /** + * INACTIVE - The key is inactive and unusable. + * Can be set to ACTIVE using the [AsymmetricKeyService.Update] method. + */ + INACTIVE = 3, + UNRECOGNIZED = -1, +} + +export function asymmetricSignatureKey_StatusFromJSON( + object: any +): AsymmetricSignatureKey_Status { + switch (object) { + case 0: + case "STATUS_UNSPECIFIED": + return AsymmetricSignatureKey_Status.STATUS_UNSPECIFIED; + case 1: + case "CREATING": + return AsymmetricSignatureKey_Status.CREATING; + case 2: + case "ACTIVE": + return AsymmetricSignatureKey_Status.ACTIVE; + case 3: + case "INACTIVE": + return AsymmetricSignatureKey_Status.INACTIVE; + case -1: + case "UNRECOGNIZED": + default: + return AsymmetricSignatureKey_Status.UNRECOGNIZED; + } +} + +export function asymmetricSignatureKey_StatusToJSON( + object: AsymmetricSignatureKey_Status +): string { + switch (object) { + case AsymmetricSignatureKey_Status.STATUS_UNSPECIFIED: + return "STATUS_UNSPECIFIED"; + case AsymmetricSignatureKey_Status.CREATING: + return "CREATING"; + case AsymmetricSignatureKey_Status.ACTIVE: + return "ACTIVE"; + case AsymmetricSignatureKey_Status.INACTIVE: + return "INACTIVE"; + default: + return "UNKNOWN"; + } +} + +export interface AsymmetricSignatureKey_LabelsEntry { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKey.LabelsEntry"; + key: string; + value: string; +} + +const baseAsymmetricSignatureKey: object = { + $type: "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKey", + id: "", + folderId: "", + name: "", + description: "", + status: 0, + signatureAlgorithm: 0, + deletionProtection: false, +}; + +export const AsymmetricSignatureKey = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKey" as const, + + encode( + message: AsymmetricSignatureKey, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(34).string(message.name); + } + if (message.description !== "") { + writer.uint32(42).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + AsymmetricSignatureKey_LabelsEntry.encode( + { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKey.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(50).fork() + ).ldelim(); + }); + if (message.status !== 0) { + writer.uint32(56).int32(message.status); + } + if (message.signatureAlgorithm !== 0) { + writer.uint32(64).int32(message.signatureAlgorithm); + } + if (message.deletionProtection === true) { + writer.uint32(72).bool(message.deletionProtection); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricSignatureKey { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAsymmetricSignatureKey } as AsymmetricSignatureKey; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.name = reader.string(); + break; + case 5: + message.description = reader.string(); + break; + case 6: + const entry6 = AsymmetricSignatureKey_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry6.value !== undefined) { + message.labels[entry6.key] = entry6.value; + } + break; + case 7: + message.status = reader.int32() as any; + break; + case 8: + message.signatureAlgorithm = reader.int32() as any; + break; + case 9: + message.deletionProtection = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricSignatureKey { + const message = { ...baseAsymmetricSignatureKey } as AsymmetricSignatureKey; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.status = + object.status !== undefined && object.status !== null + ? asymmetricSignatureKey_StatusFromJSON(object.status) + : 0; + message.signatureAlgorithm = + object.signatureAlgorithm !== undefined && + object.signatureAlgorithm !== null + ? asymmetricSignatureAlgorithmFromJSON(object.signatureAlgorithm) + : 0; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + return message; + }, + + toJSON(message: AsymmetricSignatureKey): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.status !== undefined && + (obj.status = asymmetricSignatureKey_StatusToJSON(message.status)); + message.signatureAlgorithm !== undefined && + (obj.signatureAlgorithm = asymmetricSignatureAlgorithmToJSON( + message.signatureAlgorithm + )); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsymmetricSignatureKey { + const message = { ...baseAsymmetricSignatureKey } as AsymmetricSignatureKey; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.status = object.status ?? 0; + message.signatureAlgorithm = object.signatureAlgorithm ?? 0; + message.deletionProtection = object.deletionProtection ?? false; + return message; + }, +}; + +messageTypeRegistry.set(AsymmetricSignatureKey.$type, AsymmetricSignatureKey); + +const baseAsymmetricSignatureKey_LabelsEntry: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKey.LabelsEntry", + key: "", + value: "", +}; + +export const AsymmetricSignatureKey_LabelsEntry = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKey.LabelsEntry" as const, + + encode( + message: AsymmetricSignatureKey_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsymmetricSignatureKey_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsymmetricSignatureKey_LabelsEntry, + } as AsymmetricSignatureKey_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsymmetricSignatureKey_LabelsEntry { + const message = { + ...baseAsymmetricSignatureKey_LabelsEntry, + } as AsymmetricSignatureKey_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: AsymmetricSignatureKey_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): AsymmetricSignatureKey_LabelsEntry { + const message = { + ...baseAsymmetricSignatureKey_LabelsEntry, + } as AsymmetricSignatureKey_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AsymmetricSignatureKey_LabelsEntry.$type, + AsymmetricSignatureKey_LabelsEntry +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_key_service.ts b/src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_key_service.ts new file mode 100644 index 00000000..c21cd1d3 --- /dev/null +++ b/src/generated/yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_key_service.ts @@ -0,0 +1,1967 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + AsymmetricSignatureAlgorithm, + AsymmetricSignatureKey_Status, + AsymmetricSignatureKey, + asymmetricSignatureAlgorithmFromJSON, + asymmetricSignatureAlgorithmToJSON, + asymmetricSignatureKey_StatusFromJSON, + asymmetricSignatureKey_StatusToJSON, +} from "../../../../../yandex/cloud/kms/v1/asymmetricsignature/asymmetric_signature_key"; +import { FieldMask } from "../../../../../google/protobuf/field_mask"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; +import { + ListAccessBindingsRequest, + ListAccessBindingsResponse, + SetAccessBindingsRequest, + UpdateAccessBindingsRequest, +} from "../../../../../yandex/cloud/access/access"; + +export const protobufPackage = "yandex.cloud.kms.v1.asymmetricsignature"; + +export interface CreateAsymmetricSignatureKeyRequest { + $type: "yandex.cloud.kms.v1.asymmetricsignature.CreateAsymmetricSignatureKeyRequest"; + /** ID of the folder to create a asymmetric KMS key in. */ + folderId: string; + /** Name of the key. */ + name: string; + /** Description of the key. */ + description: string; + /** + * Custom labels for the asymmetric KMS key as `key:value` pairs. Maximum 64 per key. + * For example, `"project": "mvp"` or `"source": "dictionary"`. + */ + labels: { [key: string]: string }; + /** Asymmetric signature algorithm. */ + signatureAlgorithm: AsymmetricSignatureAlgorithm; + /** Flag that inhibits deletion of the symmetric KMS key */ + deletionProtection: boolean; +} + +export interface CreateAsymmetricSignatureKeyRequest_LabelsEntry { + $type: "yandex.cloud.kms.v1.asymmetricsignature.CreateAsymmetricSignatureKeyRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateAsymmetricSignatureKeyMetadata { + $type: "yandex.cloud.kms.v1.asymmetricsignature.CreateAsymmetricSignatureKeyMetadata"; + /** ID of the key being created. */ + keyId: string; +} + +export interface GetAsymmetricSignatureKeyRequest { + $type: "yandex.cloud.kms.v1.asymmetricsignature.GetAsymmetricSignatureKeyRequest"; + /** + * ID of the asymmetric KMS key to return. + * To get the ID of an asymmetric KMS key use a [AsymmetricSignatureKeyService.List] request. + */ + keyId: string; +} + +export interface ListAsymmetricSignatureKeysRequest { + $type: "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeysRequest"; + /** ID of the folder to list asymmetric KMS keys in. */ + folderId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListAsymmetricSignatureKeysResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListAsymmetricSignatureKeysResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListAsymmetricSignatureKeysResponse { + $type: "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeysResponse"; + /** List of asymmetric KMS keys in the specified folder. */ + keys: AsymmetricSignatureKey[]; + /** + * This token allows you to get the next page of results for list requests. If the number + * of results is greater than the specified [ListAsymmetricSignatureKeysRequest.page_size], use + * the [next_page_token] as the value for the [ListAsymmetricSignatureKeysRequest.page_token] query parameter + * in the next list request. Each subsequent list request will have its own + * [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface UpdateAsymmetricSignatureKeyRequest { + $type: "yandex.cloud.kms.v1.asymmetricsignature.UpdateAsymmetricSignatureKeyRequest"; + /** + * ID of the asymmetric KMS key to update. + * To get the ID of a asymmetric KMS key use a [AsymmetricSignatureKeyService.List] request. + */ + keyId: string; + /** Field mask that specifies which attributes of the asymmetric KMS key are going to be updated. */ + updateMask?: FieldMask; + /** New name for the asymmetric KMS key. */ + name: string; + /** New description for the asymmetric KMS key. */ + description: string; + /** + * New status for the asymmetric KMS key. + * Using the [AsymmetricSignatureKeyService.Update] method you can only set ACTIVE or INACTIVE status. + */ + status: AsymmetricSignatureKey_Status; + /** Custom labels for the asymmetric KMS key as `key:value` pairs. Maximum 64 per key. */ + labels: { [key: string]: string }; + /** Flag that inhibits deletion of the asymmetric KMS key */ + deletionProtection: boolean; +} + +export interface UpdateAsymmetricSignatureKeyRequest_LabelsEntry { + $type: "yandex.cloud.kms.v1.asymmetricsignature.UpdateAsymmetricSignatureKeyRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateAsymmetricSignatureKeyMetadata { + $type: "yandex.cloud.kms.v1.asymmetricsignature.UpdateAsymmetricSignatureKeyMetadata"; + /** ID of the key being updated. */ + keyId: string; +} + +export interface DeleteAsymmetricSignatureKeyRequest { + $type: "yandex.cloud.kms.v1.asymmetricsignature.DeleteAsymmetricSignatureKeyRequest"; + /** ID of the key to be deleted. */ + keyId: string; +} + +export interface DeleteAsymmetricSignatureKeyMetadata { + $type: "yandex.cloud.kms.v1.asymmetricsignature.DeleteAsymmetricSignatureKeyMetadata"; + /** ID of the key being deleted. */ + keyId: string; +} + +export interface ListAsymmetricSignatureKeyOperationsRequest { + $type: "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeyOperationsRequest"; + /** + * ID of the symmetric KMS key to get operations for. + * + * To get the key ID, use a [AsymmetricSignatureKeyService.List] request. + */ + keyId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than [page_size], the service returns a [ListAsymmetricSignatureKeyOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListAsymmetricSignatureKeyOperationsResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListAsymmetricSignatureKeyOperationsResponse { + $type: "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeyOperationsResponse"; + /** List of operations for the specified key. */ + operations: Operation[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListAsymmetricSignatureKeyOperationsRequest.page_size], use the [next_page_token] as the value + * for the [ListAsymmetricSignatureKeyOperationsRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +const baseCreateAsymmetricSignatureKeyRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.CreateAsymmetricSignatureKeyRequest", + folderId: "", + name: "", + description: "", + signatureAlgorithm: 0, + deletionProtection: false, +}; + +export const CreateAsymmetricSignatureKeyRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.CreateAsymmetricSignatureKeyRequest" as const, + + encode( + message: CreateAsymmetricSignatureKeyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateAsymmetricSignatureKeyRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.CreateAsymmetricSignatureKeyRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.signatureAlgorithm !== 0) { + writer.uint32(40).int32(message.signatureAlgorithm); + } + if (message.deletionProtection === true) { + writer.uint32(48).bool(message.deletionProtection); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateAsymmetricSignatureKeyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateAsymmetricSignatureKeyRequest, + } as CreateAsymmetricSignatureKeyRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = CreateAsymmetricSignatureKeyRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.signatureAlgorithm = reader.int32() as any; + break; + case 6: + message.deletionProtection = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateAsymmetricSignatureKeyRequest { + const message = { + ...baseCreateAsymmetricSignatureKeyRequest, + } as CreateAsymmetricSignatureKeyRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.signatureAlgorithm = + object.signatureAlgorithm !== undefined && + object.signatureAlgorithm !== null + ? asymmetricSignatureAlgorithmFromJSON(object.signatureAlgorithm) + : 0; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + return message; + }, + + toJSON(message: CreateAsymmetricSignatureKeyRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.signatureAlgorithm !== undefined && + (obj.signatureAlgorithm = asymmetricSignatureAlgorithmToJSON( + message.signatureAlgorithm + )); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateAsymmetricSignatureKeyRequest { + const message = { + ...baseCreateAsymmetricSignatureKeyRequest, + } as CreateAsymmetricSignatureKeyRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.signatureAlgorithm = object.signatureAlgorithm ?? 0; + message.deletionProtection = object.deletionProtection ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + CreateAsymmetricSignatureKeyRequest.$type, + CreateAsymmetricSignatureKeyRequest +); + +const baseCreateAsymmetricSignatureKeyRequest_LabelsEntry: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.CreateAsymmetricSignatureKeyRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateAsymmetricSignatureKeyRequest_LabelsEntry = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.CreateAsymmetricSignatureKeyRequest.LabelsEntry" as const, + + encode( + message: CreateAsymmetricSignatureKeyRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateAsymmetricSignatureKeyRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateAsymmetricSignatureKeyRequest_LabelsEntry, + } as CreateAsymmetricSignatureKeyRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateAsymmetricSignatureKeyRequest_LabelsEntry { + const message = { + ...baseCreateAsymmetricSignatureKeyRequest_LabelsEntry, + } as CreateAsymmetricSignatureKeyRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateAsymmetricSignatureKeyRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): CreateAsymmetricSignatureKeyRequest_LabelsEntry { + const message = { + ...baseCreateAsymmetricSignatureKeyRequest_LabelsEntry, + } as CreateAsymmetricSignatureKeyRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateAsymmetricSignatureKeyRequest_LabelsEntry.$type, + CreateAsymmetricSignatureKeyRequest_LabelsEntry +); + +const baseCreateAsymmetricSignatureKeyMetadata: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.CreateAsymmetricSignatureKeyMetadata", + keyId: "", +}; + +export const CreateAsymmetricSignatureKeyMetadata = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.CreateAsymmetricSignatureKeyMetadata" as const, + + encode( + message: CreateAsymmetricSignatureKeyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateAsymmetricSignatureKeyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateAsymmetricSignatureKeyMetadata, + } as CreateAsymmetricSignatureKeyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateAsymmetricSignatureKeyMetadata { + const message = { + ...baseCreateAsymmetricSignatureKeyMetadata, + } as CreateAsymmetricSignatureKeyMetadata; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: CreateAsymmetricSignatureKeyMetadata): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateAsymmetricSignatureKeyMetadata { + const message = { + ...baseCreateAsymmetricSignatureKeyMetadata, + } as CreateAsymmetricSignatureKeyMetadata; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateAsymmetricSignatureKeyMetadata.$type, + CreateAsymmetricSignatureKeyMetadata +); + +const baseGetAsymmetricSignatureKeyRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.GetAsymmetricSignatureKeyRequest", + keyId: "", +}; + +export const GetAsymmetricSignatureKeyRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.GetAsymmetricSignatureKeyRequest" as const, + + encode( + message: GetAsymmetricSignatureKeyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetAsymmetricSignatureKeyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetAsymmetricSignatureKeyRequest, + } as GetAsymmetricSignatureKeyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetAsymmetricSignatureKeyRequest { + const message = { + ...baseGetAsymmetricSignatureKeyRequest, + } as GetAsymmetricSignatureKeyRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: GetAsymmetricSignatureKeyRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): GetAsymmetricSignatureKeyRequest { + const message = { + ...baseGetAsymmetricSignatureKeyRequest, + } as GetAsymmetricSignatureKeyRequest; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GetAsymmetricSignatureKeyRequest.$type, + GetAsymmetricSignatureKeyRequest +); + +const baseListAsymmetricSignatureKeysRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeysRequest", + folderId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListAsymmetricSignatureKeysRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeysRequest" as const, + + encode( + message: ListAsymmetricSignatureKeysRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListAsymmetricSignatureKeysRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListAsymmetricSignatureKeysRequest, + } as ListAsymmetricSignatureKeysRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListAsymmetricSignatureKeysRequest { + const message = { + ...baseListAsymmetricSignatureKeysRequest, + } as ListAsymmetricSignatureKeysRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListAsymmetricSignatureKeysRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListAsymmetricSignatureKeysRequest { + const message = { + ...baseListAsymmetricSignatureKeysRequest, + } as ListAsymmetricSignatureKeysRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListAsymmetricSignatureKeysRequest.$type, + ListAsymmetricSignatureKeysRequest +); + +const baseListAsymmetricSignatureKeysResponse: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeysResponse", + nextPageToken: "", +}; + +export const ListAsymmetricSignatureKeysResponse = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeysResponse" as const, + + encode( + message: ListAsymmetricSignatureKeysResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.keys) { + AsymmetricSignatureKey.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListAsymmetricSignatureKeysResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListAsymmetricSignatureKeysResponse, + } as ListAsymmetricSignatureKeysResponse; + message.keys = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keys.push( + AsymmetricSignatureKey.decode(reader, reader.uint32()) + ); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListAsymmetricSignatureKeysResponse { + const message = { + ...baseListAsymmetricSignatureKeysResponse, + } as ListAsymmetricSignatureKeysResponse; + message.keys = (object.keys ?? []).map((e: any) => + AsymmetricSignatureKey.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListAsymmetricSignatureKeysResponse): unknown { + const obj: any = {}; + if (message.keys) { + obj.keys = message.keys.map((e) => + e ? AsymmetricSignatureKey.toJSON(e) : undefined + ); + } else { + obj.keys = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListAsymmetricSignatureKeysResponse { + const message = { + ...baseListAsymmetricSignatureKeysResponse, + } as ListAsymmetricSignatureKeysResponse; + message.keys = + object.keys?.map((e) => AsymmetricSignatureKey.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListAsymmetricSignatureKeysResponse.$type, + ListAsymmetricSignatureKeysResponse +); + +const baseUpdateAsymmetricSignatureKeyRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.UpdateAsymmetricSignatureKeyRequest", + keyId: "", + name: "", + description: "", + status: 0, + deletionProtection: false, +}; + +export const UpdateAsymmetricSignatureKeyRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.UpdateAsymmetricSignatureKeyRequest" as const, + + encode( + message: UpdateAsymmetricSignatureKeyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + if (message.status !== 0) { + writer.uint32(40).int32(message.status); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateAsymmetricSignatureKeyRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.UpdateAsymmetricSignatureKeyRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(50).fork() + ).ldelim(); + }); + if (message.deletionProtection === true) { + writer.uint32(56).bool(message.deletionProtection); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateAsymmetricSignatureKeyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateAsymmetricSignatureKeyRequest, + } as UpdateAsymmetricSignatureKeyRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + message.status = reader.int32() as any; + break; + case 6: + const entry6 = UpdateAsymmetricSignatureKeyRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry6.value !== undefined) { + message.labels[entry6.key] = entry6.value; + } + break; + case 7: + message.deletionProtection = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateAsymmetricSignatureKeyRequest { + const message = { + ...baseUpdateAsymmetricSignatureKeyRequest, + } as UpdateAsymmetricSignatureKeyRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.status = + object.status !== undefined && object.status !== null + ? asymmetricSignatureKey_StatusFromJSON(object.status) + : 0; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + return message; + }, + + toJSON(message: UpdateAsymmetricSignatureKeyRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + message.status !== undefined && + (obj.status = asymmetricSignatureKey_StatusToJSON(message.status)); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateAsymmetricSignatureKeyRequest { + const message = { + ...baseUpdateAsymmetricSignatureKeyRequest, + } as UpdateAsymmetricSignatureKeyRequest; + message.keyId = object.keyId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.status = object.status ?? 0; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.deletionProtection = object.deletionProtection ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateAsymmetricSignatureKeyRequest.$type, + UpdateAsymmetricSignatureKeyRequest +); + +const baseUpdateAsymmetricSignatureKeyRequest_LabelsEntry: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.UpdateAsymmetricSignatureKeyRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateAsymmetricSignatureKeyRequest_LabelsEntry = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.UpdateAsymmetricSignatureKeyRequest.LabelsEntry" as const, + + encode( + message: UpdateAsymmetricSignatureKeyRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateAsymmetricSignatureKeyRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateAsymmetricSignatureKeyRequest_LabelsEntry, + } as UpdateAsymmetricSignatureKeyRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateAsymmetricSignatureKeyRequest_LabelsEntry { + const message = { + ...baseUpdateAsymmetricSignatureKeyRequest_LabelsEntry, + } as UpdateAsymmetricSignatureKeyRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateAsymmetricSignatureKeyRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): UpdateAsymmetricSignatureKeyRequest_LabelsEntry { + const message = { + ...baseUpdateAsymmetricSignatureKeyRequest_LabelsEntry, + } as UpdateAsymmetricSignatureKeyRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateAsymmetricSignatureKeyRequest_LabelsEntry.$type, + UpdateAsymmetricSignatureKeyRequest_LabelsEntry +); + +const baseUpdateAsymmetricSignatureKeyMetadata: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.UpdateAsymmetricSignatureKeyMetadata", + keyId: "", +}; + +export const UpdateAsymmetricSignatureKeyMetadata = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.UpdateAsymmetricSignatureKeyMetadata" as const, + + encode( + message: UpdateAsymmetricSignatureKeyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateAsymmetricSignatureKeyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateAsymmetricSignatureKeyMetadata, + } as UpdateAsymmetricSignatureKeyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateAsymmetricSignatureKeyMetadata { + const message = { + ...baseUpdateAsymmetricSignatureKeyMetadata, + } as UpdateAsymmetricSignatureKeyMetadata; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: UpdateAsymmetricSignatureKeyMetadata): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateAsymmetricSignatureKeyMetadata { + const message = { + ...baseUpdateAsymmetricSignatureKeyMetadata, + } as UpdateAsymmetricSignatureKeyMetadata; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateAsymmetricSignatureKeyMetadata.$type, + UpdateAsymmetricSignatureKeyMetadata +); + +const baseDeleteAsymmetricSignatureKeyRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.DeleteAsymmetricSignatureKeyRequest", + keyId: "", +}; + +export const DeleteAsymmetricSignatureKeyRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.DeleteAsymmetricSignatureKeyRequest" as const, + + encode( + message: DeleteAsymmetricSignatureKeyRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteAsymmetricSignatureKeyRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteAsymmetricSignatureKeyRequest, + } as DeleteAsymmetricSignatureKeyRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteAsymmetricSignatureKeyRequest { + const message = { + ...baseDeleteAsymmetricSignatureKeyRequest, + } as DeleteAsymmetricSignatureKeyRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: DeleteAsymmetricSignatureKeyRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): DeleteAsymmetricSignatureKeyRequest { + const message = { + ...baseDeleteAsymmetricSignatureKeyRequest, + } as DeleteAsymmetricSignatureKeyRequest; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteAsymmetricSignatureKeyRequest.$type, + DeleteAsymmetricSignatureKeyRequest +); + +const baseDeleteAsymmetricSignatureKeyMetadata: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.DeleteAsymmetricSignatureKeyMetadata", + keyId: "", +}; + +export const DeleteAsymmetricSignatureKeyMetadata = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.DeleteAsymmetricSignatureKeyMetadata" as const, + + encode( + message: DeleteAsymmetricSignatureKeyMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteAsymmetricSignatureKeyMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteAsymmetricSignatureKeyMetadata, + } as DeleteAsymmetricSignatureKeyMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteAsymmetricSignatureKeyMetadata { + const message = { + ...baseDeleteAsymmetricSignatureKeyMetadata, + } as DeleteAsymmetricSignatureKeyMetadata; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + return message; + }, + + toJSON(message: DeleteAsymmetricSignatureKeyMetadata): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): DeleteAsymmetricSignatureKeyMetadata { + const message = { + ...baseDeleteAsymmetricSignatureKeyMetadata, + } as DeleteAsymmetricSignatureKeyMetadata; + message.keyId = object.keyId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteAsymmetricSignatureKeyMetadata.$type, + DeleteAsymmetricSignatureKeyMetadata +); + +const baseListAsymmetricSignatureKeyOperationsRequest: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeyOperationsRequest", + keyId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListAsymmetricSignatureKeyOperationsRequest = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeyOperationsRequest" as const, + + encode( + message: ListAsymmetricSignatureKeyOperationsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.keyId !== "") { + writer.uint32(10).string(message.keyId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListAsymmetricSignatureKeyOperationsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListAsymmetricSignatureKeyOperationsRequest, + } as ListAsymmetricSignatureKeyOperationsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListAsymmetricSignatureKeyOperationsRequest { + const message = { + ...baseListAsymmetricSignatureKeyOperationsRequest, + } as ListAsymmetricSignatureKeyOperationsRequest; + message.keyId = + object.keyId !== undefined && object.keyId !== null + ? String(object.keyId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListAsymmetricSignatureKeyOperationsRequest): unknown { + const obj: any = {}; + message.keyId !== undefined && (obj.keyId = message.keyId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListAsymmetricSignatureKeyOperationsRequest { + const message = { + ...baseListAsymmetricSignatureKeyOperationsRequest, + } as ListAsymmetricSignatureKeyOperationsRequest; + message.keyId = object.keyId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListAsymmetricSignatureKeyOperationsRequest.$type, + ListAsymmetricSignatureKeyOperationsRequest +); + +const baseListAsymmetricSignatureKeyOperationsResponse: object = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeyOperationsResponse", + nextPageToken: "", +}; + +export const ListAsymmetricSignatureKeyOperationsResponse = { + $type: + "yandex.cloud.kms.v1.asymmetricsignature.ListAsymmetricSignatureKeyOperationsResponse" as const, + + encode( + message: ListAsymmetricSignatureKeyOperationsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.operations) { + Operation.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListAsymmetricSignatureKeyOperationsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListAsymmetricSignatureKeyOperationsResponse, + } as ListAsymmetricSignatureKeyOperationsResponse; + message.operations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operations.push(Operation.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListAsymmetricSignatureKeyOperationsResponse { + const message = { + ...baseListAsymmetricSignatureKeyOperationsResponse, + } as ListAsymmetricSignatureKeyOperationsResponse; + message.operations = (object.operations ?? []).map((e: any) => + Operation.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListAsymmetricSignatureKeyOperationsResponse): unknown { + const obj: any = {}; + if (message.operations) { + obj.operations = message.operations.map((e) => + e ? Operation.toJSON(e) : undefined + ); + } else { + obj.operations = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): ListAsymmetricSignatureKeyOperationsResponse { + const message = { + ...baseListAsymmetricSignatureKeyOperationsResponse, + } as ListAsymmetricSignatureKeyOperationsResponse; + message.operations = + object.operations?.map((e) => Operation.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListAsymmetricSignatureKeyOperationsResponse.$type, + ListAsymmetricSignatureKeyOperationsResponse +); + +/** Set of methods for managing asymmetric signature keys. */ +export const AsymmetricSignatureKeyServiceService = { + /** + * control plane + * Creates an asymmetric KMS key in the specified folder. + */ + create: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKeyService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateAsymmetricSignatureKeyRequest) => + Buffer.from(CreateAsymmetricSignatureKeyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + CreateAsymmetricSignatureKeyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Returns the specified asymmetric KMS key. + * + * To get the list of available asymmetric KMS keys, make a [SymmetricKeyService.List] request. + */ + get: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKeyService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetAsymmetricSignatureKeyRequest) => + Buffer.from(GetAsymmetricSignatureKeyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + GetAsymmetricSignatureKeyRequest.decode(value), + responseSerialize: (value: AsymmetricSignatureKey) => + Buffer.from(AsymmetricSignatureKey.encode(value).finish()), + responseDeserialize: (value: Buffer) => + AsymmetricSignatureKey.decode(value), + }, + /** Returns the list of asymmetric KMS keys in the specified folder. */ + list: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKeyService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAsymmetricSignatureKeysRequest) => + Buffer.from(ListAsymmetricSignatureKeysRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListAsymmetricSignatureKeysRequest.decode(value), + responseSerialize: (value: ListAsymmetricSignatureKeysResponse) => + Buffer.from(ListAsymmetricSignatureKeysResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListAsymmetricSignatureKeysResponse.decode(value), + }, + /** Updates the specified asymmetric KMS key. */ + update: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKeyService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAsymmetricSignatureKeyRequest) => + Buffer.from(UpdateAsymmetricSignatureKeyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAsymmetricSignatureKeyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Deletes the specified asymmetric KMS key. This action also automatically schedules + * the destruction of all of the key's versions in 72 hours. + * + * The key and its versions appear absent in [AsymmetricSignatureKeyService.Get] and [AsymmetricSignatureKeyService.List] + * requests, but can be restored within 72 hours with a request to tech support. + */ + delete: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKeyService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteAsymmetricSignatureKeyRequest) => + Buffer.from(DeleteAsymmetricSignatureKeyRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteAsymmetricSignatureKeyRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Lists operations for the specified asymmetric KMS key. */ + listOperations: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKeyService/ListOperations", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAsymmetricSignatureKeyOperationsRequest) => + Buffer.from( + ListAsymmetricSignatureKeyOperationsRequest.encode(value).finish() + ), + requestDeserialize: (value: Buffer) => + ListAsymmetricSignatureKeyOperationsRequest.decode(value), + responseSerialize: (value: ListAsymmetricSignatureKeyOperationsResponse) => + Buffer.from( + ListAsymmetricSignatureKeyOperationsResponse.encode(value).finish() + ), + responseDeserialize: (value: Buffer) => + ListAsymmetricSignatureKeyOperationsResponse.decode(value), + }, + /** Lists existing access bindings for the specified key. */ + listAccessBindings: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKeyService/ListAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAccessBindingsRequest) => + Buffer.from(ListAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListAccessBindingsRequest.decode(value), + responseSerialize: (value: ListAccessBindingsResponse) => + Buffer.from(ListAccessBindingsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListAccessBindingsResponse.decode(value), + }, + /** Sets access bindings for the key. */ + setAccessBindings: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKeyService/SetAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: SetAccessBindingsRequest) => + Buffer.from(SetAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + SetAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates access bindings for the specified key. */ + updateAccessBindings: { + path: "/yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKeyService/UpdateAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAccessBindingsRequest) => + Buffer.from(UpdateAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface AsymmetricSignatureKeyServiceServer + extends UntypedServiceImplementation { + /** + * control plane + * Creates an asymmetric KMS key in the specified folder. + */ + create: handleUnaryCall; + /** + * Returns the specified asymmetric KMS key. + * + * To get the list of available asymmetric KMS keys, make a [SymmetricKeyService.List] request. + */ + get: handleUnaryCall< + GetAsymmetricSignatureKeyRequest, + AsymmetricSignatureKey + >; + /** Returns the list of asymmetric KMS keys in the specified folder. */ + list: handleUnaryCall< + ListAsymmetricSignatureKeysRequest, + ListAsymmetricSignatureKeysResponse + >; + /** Updates the specified asymmetric KMS key. */ + update: handleUnaryCall; + /** + * Deletes the specified asymmetric KMS key. This action also automatically schedules + * the destruction of all of the key's versions in 72 hours. + * + * The key and its versions appear absent in [AsymmetricSignatureKeyService.Get] and [AsymmetricSignatureKeyService.List] + * requests, but can be restored within 72 hours with a request to tech support. + */ + delete: handleUnaryCall; + /** Lists operations for the specified asymmetric KMS key. */ + listOperations: handleUnaryCall< + ListAsymmetricSignatureKeyOperationsRequest, + ListAsymmetricSignatureKeyOperationsResponse + >; + /** Lists existing access bindings for the specified key. */ + listAccessBindings: handleUnaryCall< + ListAccessBindingsRequest, + ListAccessBindingsResponse + >; + /** Sets access bindings for the key. */ + setAccessBindings: handleUnaryCall; + /** Updates access bindings for the specified key. */ + updateAccessBindings: handleUnaryCall; +} + +export interface AsymmetricSignatureKeyServiceClient extends Client { + /** + * control plane + * Creates an asymmetric KMS key in the specified folder. + */ + create( + request: CreateAsymmetricSignatureKeyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateAsymmetricSignatureKeyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateAsymmetricSignatureKeyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Returns the specified asymmetric KMS key. + * + * To get the list of available asymmetric KMS keys, make a [SymmetricKeyService.List] request. + */ + get( + request: GetAsymmetricSignatureKeyRequest, + callback: ( + error: ServiceError | null, + response: AsymmetricSignatureKey + ) => void + ): ClientUnaryCall; + get( + request: GetAsymmetricSignatureKeyRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: AsymmetricSignatureKey + ) => void + ): ClientUnaryCall; + get( + request: GetAsymmetricSignatureKeyRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: AsymmetricSignatureKey + ) => void + ): ClientUnaryCall; + /** Returns the list of asymmetric KMS keys in the specified folder. */ + list( + request: ListAsymmetricSignatureKeysRequest, + callback: ( + error: ServiceError | null, + response: ListAsymmetricSignatureKeysResponse + ) => void + ): ClientUnaryCall; + list( + request: ListAsymmetricSignatureKeysRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAsymmetricSignatureKeysResponse + ) => void + ): ClientUnaryCall; + list( + request: ListAsymmetricSignatureKeysRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAsymmetricSignatureKeysResponse + ) => void + ): ClientUnaryCall; + /** Updates the specified asymmetric KMS key. */ + update( + request: UpdateAsymmetricSignatureKeyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateAsymmetricSignatureKeyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateAsymmetricSignatureKeyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Deletes the specified asymmetric KMS key. This action also automatically schedules + * the destruction of all of the key's versions in 72 hours. + * + * The key and its versions appear absent in [AsymmetricSignatureKeyService.Get] and [AsymmetricSignatureKeyService.List] + * requests, but can be restored within 72 hours with a request to tech support. + */ + delete( + request: DeleteAsymmetricSignatureKeyRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteAsymmetricSignatureKeyRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteAsymmetricSignatureKeyRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Lists operations for the specified asymmetric KMS key. */ + listOperations( + request: ListAsymmetricSignatureKeyOperationsRequest, + callback: ( + error: ServiceError | null, + response: ListAsymmetricSignatureKeyOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListAsymmetricSignatureKeyOperationsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAsymmetricSignatureKeyOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListAsymmetricSignatureKeyOperationsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAsymmetricSignatureKeyOperationsResponse + ) => void + ): ClientUnaryCall; + /** Lists existing access bindings for the specified key. */ + listAccessBindings( + request: ListAccessBindingsRequest, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + /** Sets access bindings for the key. */ + setAccessBindings( + request: SetAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates access bindings for the specified key. */ + updateAccessBindings( + request: UpdateAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const AsymmetricSignatureKeyServiceClient = makeGenericClientConstructor( + AsymmetricSignatureKeyServiceService, + "yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKeyService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): AsymmetricSignatureKeyServiceClient; + service: typeof AsymmetricSignatureKeyServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadbalancer/v1/network_load_balancer.ts b/src/generated/yandex/cloud/loadbalancer/v1/network_load_balancer.ts index dd740eba..37408a2b 100644 --- a/src/generated/yandex/cloud/loadbalancer/v1/network_load_balancer.ts +++ b/src/generated/yandex/cloud/loadbalancer/v1/network_load_balancer.ts @@ -78,6 +78,8 @@ export interface NetworkLoadBalancer { listeners: Listener[]; /** List of target groups attached to the network load balancer. */ attachedTargetGroups: AttachedTargetGroup[]; + /** Specifies if network load balancer protected from deletion. */ + deletionProtection: boolean; } export enum NetworkLoadBalancer_Status { @@ -163,7 +165,6 @@ export function networkLoadBalancer_StatusToJSON( } } -/** Type of the load balancer. Only external load balancers are currently available. */ export enum NetworkLoadBalancer_Type { TYPE_UNSPECIFIED = 0, /** EXTERNAL - External network load balancer. */ @@ -408,6 +409,7 @@ const baseNetworkLoadBalancer: object = { status: 0, type: 0, sessionAffinity: 0, + deletionProtection: false, }; export const NetworkLoadBalancer = { @@ -463,6 +465,9 @@ export const NetworkLoadBalancer = { for (const v of message.attachedTargetGroups) { AttachedTargetGroup.encode(v!, writer.uint32(106).fork()).ldelim(); } + if (message.deletionProtection === true) { + writer.uint32(112).bool(message.deletionProtection); + } return writer; }, @@ -522,6 +527,9 @@ export const NetworkLoadBalancer = { AttachedTargetGroup.decode(reader, reader.uint32()) ); break; + case 14: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -578,6 +586,11 @@ export const NetworkLoadBalancer = { message.attachedTargetGroups = (object.attachedTargetGroups ?? []).map( (e: any) => AttachedTargetGroup.fromJSON(e) ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -619,6 +632,8 @@ export const NetworkLoadBalancer = { } else { obj.attachedTargetGroups = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -649,6 +664,7 @@ export const NetworkLoadBalancer = { object.attachedTargetGroups?.map((e) => AttachedTargetGroup.fromPartial(e) ) || []; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/loadbalancer/v1/network_load_balancer_service.ts b/src/generated/yandex/cloud/loadbalancer/v1/network_load_balancer_service.ts index cb1215d3..6ea4d29c 100644 --- a/src/generated/yandex/cloud/loadbalancer/v1/network_load_balancer_service.ts +++ b/src/generated/yandex/cloud/loadbalancer/v1/network_load_balancer_service.ts @@ -105,12 +105,14 @@ export interface CreateNetworkLoadBalancerRequest { labels: { [key: string]: string }; /** ID of the region where the network load balancer resides. */ regionId: string; - /** Type of the network load balancer. Only external network load balancers are currently available. */ + /** Type of the network load balancer. */ type: NetworkLoadBalancer_Type; /** List of listeners and their specs for the network load balancer. */ listenerSpecs: ListenerSpec[]; /** List of attached target groups for the network load balancer. */ attachedTargetGroups: AttachedTargetGroup[]; + /** Specifies if network load balancer protected from deletion. */ + deletionProtection: boolean; } export interface CreateNetworkLoadBalancerRequest_LabelsEntry { @@ -151,6 +153,8 @@ export interface UpdateNetworkLoadBalancerRequest { listenerSpecs: ListenerSpec[]; /** A list of attached target groups for the network load balancer. */ attachedTargetGroups: AttachedTargetGroup[]; + /** Specifies if network load balancer protected from deletion. */ + deletionProtection: boolean; } export interface UpdateNetworkLoadBalancerRequest_LabelsEntry { @@ -671,6 +675,7 @@ const baseCreateNetworkLoadBalancerRequest: object = { description: "", regionId: "", type: 0, + deletionProtection: false, }; export const CreateNetworkLoadBalancerRequest = { @@ -713,6 +718,9 @@ export const CreateNetworkLoadBalancerRequest = { for (const v of message.attachedTargetGroups) { AttachedTargetGroup.encode(v!, writer.uint32(66).fork()).ldelim(); } + if (message.deletionProtection === true) { + writer.uint32(72).bool(message.deletionProtection); + } return writer; }, @@ -765,6 +773,9 @@ export const CreateNetworkLoadBalancerRequest = { AttachedTargetGroup.decode(reader, reader.uint32()) ); break; + case 9: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -809,6 +820,11 @@ export const CreateNetworkLoadBalancerRequest = { message.attachedTargetGroups = (object.attachedTargetGroups ?? []).map( (e: any) => AttachedTargetGroup.fromJSON(e) ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -841,6 +857,8 @@ export const CreateNetworkLoadBalancerRequest = { } else { obj.attachedTargetGroups = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -869,6 +887,7 @@ export const CreateNetworkLoadBalancerRequest = { object.attachedTargetGroups?.map((e) => AttachedTargetGroup.fromPartial(e) ) || []; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; @@ -1050,6 +1069,7 @@ const baseUpdateNetworkLoadBalancerRequest: object = { networkLoadBalancerId: "", name: "", description: "", + deletionProtection: false, }; export const UpdateNetworkLoadBalancerRequest = { @@ -1089,6 +1109,9 @@ export const UpdateNetworkLoadBalancerRequest = { for (const v of message.attachedTargetGroups) { AttachedTargetGroup.encode(v!, writer.uint32(58).fork()).ldelim(); } + if (message.deletionProtection === true) { + writer.uint32(64).bool(message.deletionProtection); + } return writer; }, @@ -1138,6 +1161,9 @@ export const UpdateNetworkLoadBalancerRequest = { AttachedTargetGroup.decode(reader, reader.uint32()) ); break; + case 8: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1179,6 +1205,11 @@ export const UpdateNetworkLoadBalancerRequest = { message.attachedTargetGroups = (object.attachedTargetGroups ?? []).map( (e: any) => AttachedTargetGroup.fromJSON(e) ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -1213,6 +1244,8 @@ export const UpdateNetworkLoadBalancerRequest = { } else { obj.attachedTargetGroups = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -1243,6 +1276,7 @@ export const UpdateNetworkLoadBalancerRequest = { object.attachedTargetGroups?.map((e) => AttachedTargetGroup.fromPartial(e) ) || []; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/loadbalancer/v1/target_group.ts b/src/generated/yandex/cloud/loadbalancer/v1/target_group.ts index 8ca98768..3682209a 100644 --- a/src/generated/yandex/cloud/loadbalancer/v1/target_group.ts +++ b/src/generated/yandex/cloud/loadbalancer/v1/target_group.ts @@ -6,7 +6,7 @@ import { Timestamp } from "../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.loadbalancer.v1"; -/** A TargetGroup resource. For more information, see [Target groups and resources](/docs/network-load-balancer/target-resources). */ +/** A TargetGroup resource. For more information, see [Target groups and resources](/docs/network-load-balancer/concepts/target-resources). */ export interface TargetGroup { $type: "yandex.cloud.loadbalancer.v1.TargetGroup"; /** Output only. ID of the target group. */ diff --git a/src/generated/yandex/cloud/loadtesting/agent/v1/agent.ts b/src/generated/yandex/cloud/loadtesting/agent/v1/agent.ts new file mode 100644 index 00000000..87e0e105 --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/agent/v1/agent.ts @@ -0,0 +1,103 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.loadtesting.agent.v1"; + +export interface AgentInstance { + $type: "yandex.cloud.loadtesting.agent.v1.AgentInstance"; + id: string; +} + +const baseAgentInstance: object = { + $type: "yandex.cloud.loadtesting.agent.v1.AgentInstance", + id: "", +}; + +export const AgentInstance = { + $type: "yandex.cloud.loadtesting.agent.v1.AgentInstance" as const, + + encode( + message: AgentInstance, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AgentInstance { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAgentInstance } as AgentInstance; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AgentInstance { + const message = { ...baseAgentInstance } as AgentInstance; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + return message; + }, + + toJSON(message: AgentInstance): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + return obj; + }, + + fromPartial, I>>( + object: I + ): AgentInstance { + const message = { ...baseAgentInstance } as AgentInstance; + message.id = object.id ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AgentInstance.$type, AgentInstance); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/agent/v1/agent_registration_service.ts b/src/generated/yandex/cloud/loadtesting/agent/v1/agent_registration_service.ts new file mode 100644 index 00000000..ad292646 --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/agent/v1/agent_registration_service.ts @@ -0,0 +1,480 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.loadtesting.agent.v1"; + +export interface RegisterRequest { + $type: "yandex.cloud.loadtesting.agent.v1.RegisterRequest"; + computeInstanceId: string; +} + +export interface RegisterResponse { + $type: "yandex.cloud.loadtesting.agent.v1.RegisterResponse"; + agentInstanceId: string; +} + +export interface ExternalAgentRegisterRequest { + $type: "yandex.cloud.loadtesting.agent.v1.ExternalAgentRegisterRequest"; + folderId: string; + computeInstanceId: string; + name: string; + agentVersion: string; +} + +export interface ExternalAgentRegisterMetadata { + $type: "yandex.cloud.loadtesting.agent.v1.ExternalAgentRegisterMetadata"; + agentInstanceId: string; +} + +const baseRegisterRequest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.RegisterRequest", + computeInstanceId: "", +}; + +export const RegisterRequest = { + $type: "yandex.cloud.loadtesting.agent.v1.RegisterRequest" as const, + + encode( + message: RegisterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RegisterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRegisterRequest } as RegisterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RegisterRequest { + const message = { ...baseRegisterRequest } as RegisterRequest; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + return message; + }, + + toJSON(message: RegisterRequest): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RegisterRequest { + const message = { ...baseRegisterRequest } as RegisterRequest; + message.computeInstanceId = object.computeInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RegisterRequest.$type, RegisterRequest); + +const baseRegisterResponse: object = { + $type: "yandex.cloud.loadtesting.agent.v1.RegisterResponse", + agentInstanceId: "", +}; + +export const RegisterResponse = { + $type: "yandex.cloud.loadtesting.agent.v1.RegisterResponse" as const, + + encode( + message: RegisterResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.agentInstanceId !== "") { + writer.uint32(10).string(message.agentInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RegisterResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRegisterResponse } as RegisterResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.agentInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RegisterResponse { + const message = { ...baseRegisterResponse } as RegisterResponse; + message.agentInstanceId = + object.agentInstanceId !== undefined && object.agentInstanceId !== null + ? String(object.agentInstanceId) + : ""; + return message; + }, + + toJSON(message: RegisterResponse): unknown { + const obj: any = {}; + message.agentInstanceId !== undefined && + (obj.agentInstanceId = message.agentInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RegisterResponse { + const message = { ...baseRegisterResponse } as RegisterResponse; + message.agentInstanceId = object.agentInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RegisterResponse.$type, RegisterResponse); + +const baseExternalAgentRegisterRequest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.ExternalAgentRegisterRequest", + folderId: "", + computeInstanceId: "", + name: "", + agentVersion: "", +}; + +export const ExternalAgentRegisterRequest = { + $type: + "yandex.cloud.loadtesting.agent.v1.ExternalAgentRegisterRequest" as const, + + encode( + message: ExternalAgentRegisterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.computeInstanceId !== "") { + writer.uint32(18).string(message.computeInstanceId); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.agentVersion !== "") { + writer.uint32(34).string(message.agentVersion); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ExternalAgentRegisterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseExternalAgentRegisterRequest, + } as ExternalAgentRegisterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.computeInstanceId = reader.string(); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.agentVersion = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExternalAgentRegisterRequest { + const message = { + ...baseExternalAgentRegisterRequest, + } as ExternalAgentRegisterRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.agentVersion = + object.agentVersion !== undefined && object.agentVersion !== null + ? String(object.agentVersion) + : ""; + return message; + }, + + toJSON(message: ExternalAgentRegisterRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.name !== undefined && (obj.name = message.name); + message.agentVersion !== undefined && + (obj.agentVersion = message.agentVersion); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExternalAgentRegisterRequest { + const message = { + ...baseExternalAgentRegisterRequest, + } as ExternalAgentRegisterRequest; + message.folderId = object.folderId ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.name = object.name ?? ""; + message.agentVersion = object.agentVersion ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ExternalAgentRegisterRequest.$type, + ExternalAgentRegisterRequest +); + +const baseExternalAgentRegisterMetadata: object = { + $type: "yandex.cloud.loadtesting.agent.v1.ExternalAgentRegisterMetadata", + agentInstanceId: "", +}; + +export const ExternalAgentRegisterMetadata = { + $type: + "yandex.cloud.loadtesting.agent.v1.ExternalAgentRegisterMetadata" as const, + + encode( + message: ExternalAgentRegisterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.agentInstanceId !== "") { + writer.uint32(10).string(message.agentInstanceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ExternalAgentRegisterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseExternalAgentRegisterMetadata, + } as ExternalAgentRegisterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.agentInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExternalAgentRegisterMetadata { + const message = { + ...baseExternalAgentRegisterMetadata, + } as ExternalAgentRegisterMetadata; + message.agentInstanceId = + object.agentInstanceId !== undefined && object.agentInstanceId !== null + ? String(object.agentInstanceId) + : ""; + return message; + }, + + toJSON(message: ExternalAgentRegisterMetadata): unknown { + const obj: any = {}; + message.agentInstanceId !== undefined && + (obj.agentInstanceId = message.agentInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExternalAgentRegisterMetadata { + const message = { + ...baseExternalAgentRegisterMetadata, + } as ExternalAgentRegisterMetadata; + message.agentInstanceId = object.agentInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ExternalAgentRegisterMetadata.$type, + ExternalAgentRegisterMetadata +); + +export const AgentRegistrationServiceService = { + /** Registers specified agent. */ + register: { + path: "/yandex.cloud.loadtesting.agent.v1.AgentRegistrationService/Register", + requestStream: false, + responseStream: false, + requestSerialize: (value: RegisterRequest) => + Buffer.from(RegisterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RegisterRequest.decode(value), + responseSerialize: (value: RegisterResponse) => + Buffer.from(RegisterResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => RegisterResponse.decode(value), + }, + /** Registers external agent. */ + externalAgentRegister: { + path: "/yandex.cloud.loadtesting.agent.v1.AgentRegistrationService/ExternalAgentRegister", + requestStream: false, + responseStream: false, + requestSerialize: (value: ExternalAgentRegisterRequest) => + Buffer.from(ExternalAgentRegisterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ExternalAgentRegisterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface AgentRegistrationServiceServer + extends UntypedServiceImplementation { + /** Registers specified agent. */ + register: handleUnaryCall; + /** Registers external agent. */ + externalAgentRegister: handleUnaryCall< + ExternalAgentRegisterRequest, + Operation + >; +} + +export interface AgentRegistrationServiceClient extends Client { + /** Registers specified agent. */ + register( + request: RegisterRequest, + callback: (error: ServiceError | null, response: RegisterResponse) => void + ): ClientUnaryCall; + register( + request: RegisterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: RegisterResponse) => void + ): ClientUnaryCall; + register( + request: RegisterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: RegisterResponse) => void + ): ClientUnaryCall; + /** Registers external agent. */ + externalAgentRegister( + request: ExternalAgentRegisterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + externalAgentRegister( + request: ExternalAgentRegisterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + externalAgentRegister( + request: ExternalAgentRegisterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const AgentRegistrationServiceClient = makeGenericClientConstructor( + AgentRegistrationServiceService, + "yandex.cloud.loadtesting.agent.v1.AgentRegistrationService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): AgentRegistrationServiceClient; + service: typeof AgentRegistrationServiceService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/agent/v1/agent_service.ts b/src/generated/yandex/cloud/loadtesting/agent/v1/agent_service.ts new file mode 100644 index 00000000..ed7e2e83 --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/agent/v1/agent_service.ts @@ -0,0 +1,370 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.loadtesting.agent.v1"; + +export interface ClaimAgentStatusRequest { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimAgentStatusRequest"; + agentInstanceId: string; + status: ClaimAgentStatusRequest_Status; +} + +export enum ClaimAgentStatusRequest_Status { + STATUS_UNSPECIFIED = 0, + READY_FOR_TEST = 1, + PREPARING_TEST = 2, + TESTING = 3, + TANK_FAILED = 4, + STOPPED = 5, + UPLOADING_ARTIFACTS = 6, + UNRECOGNIZED = -1, +} + +export function claimAgentStatusRequest_StatusFromJSON( + object: any +): ClaimAgentStatusRequest_Status { + switch (object) { + case 0: + case "STATUS_UNSPECIFIED": + return ClaimAgentStatusRequest_Status.STATUS_UNSPECIFIED; + case 1: + case "READY_FOR_TEST": + return ClaimAgentStatusRequest_Status.READY_FOR_TEST; + case 2: + case "PREPARING_TEST": + return ClaimAgentStatusRequest_Status.PREPARING_TEST; + case 3: + case "TESTING": + return ClaimAgentStatusRequest_Status.TESTING; + case 4: + case "TANK_FAILED": + return ClaimAgentStatusRequest_Status.TANK_FAILED; + case 5: + case "STOPPED": + return ClaimAgentStatusRequest_Status.STOPPED; + case 6: + case "UPLOADING_ARTIFACTS": + return ClaimAgentStatusRequest_Status.UPLOADING_ARTIFACTS; + case -1: + case "UNRECOGNIZED": + default: + return ClaimAgentStatusRequest_Status.UNRECOGNIZED; + } +} + +export function claimAgentStatusRequest_StatusToJSON( + object: ClaimAgentStatusRequest_Status +): string { + switch (object) { + case ClaimAgentStatusRequest_Status.STATUS_UNSPECIFIED: + return "STATUS_UNSPECIFIED"; + case ClaimAgentStatusRequest_Status.READY_FOR_TEST: + return "READY_FOR_TEST"; + case ClaimAgentStatusRequest_Status.PREPARING_TEST: + return "PREPARING_TEST"; + case ClaimAgentStatusRequest_Status.TESTING: + return "TESTING"; + case ClaimAgentStatusRequest_Status.TANK_FAILED: + return "TANK_FAILED"; + case ClaimAgentStatusRequest_Status.STOPPED: + return "STOPPED"; + case ClaimAgentStatusRequest_Status.UPLOADING_ARTIFACTS: + return "UPLOADING_ARTIFACTS"; + default: + return "UNKNOWN"; + } +} + +export interface ClaimAgentStatusResponse { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimAgentStatusResponse"; + code: number; +} + +const baseClaimAgentStatusRequest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimAgentStatusRequest", + agentInstanceId: "", + status: 0, +}; + +export const ClaimAgentStatusRequest = { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimAgentStatusRequest" as const, + + encode( + message: ClaimAgentStatusRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.agentInstanceId !== "") { + writer.uint32(10).string(message.agentInstanceId); + } + if (message.status !== 0) { + writer.uint32(16).int32(message.status); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ClaimAgentStatusRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseClaimAgentStatusRequest, + } as ClaimAgentStatusRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.agentInstanceId = reader.string(); + break; + case 2: + message.status = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClaimAgentStatusRequest { + const message = { + ...baseClaimAgentStatusRequest, + } as ClaimAgentStatusRequest; + message.agentInstanceId = + object.agentInstanceId !== undefined && object.agentInstanceId !== null + ? String(object.agentInstanceId) + : ""; + message.status = + object.status !== undefined && object.status !== null + ? claimAgentStatusRequest_StatusFromJSON(object.status) + : 0; + return message; + }, + + toJSON(message: ClaimAgentStatusRequest): unknown { + const obj: any = {}; + message.agentInstanceId !== undefined && + (obj.agentInstanceId = message.agentInstanceId); + message.status !== undefined && + (obj.status = claimAgentStatusRequest_StatusToJSON(message.status)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClaimAgentStatusRequest { + const message = { + ...baseClaimAgentStatusRequest, + } as ClaimAgentStatusRequest; + message.agentInstanceId = object.agentInstanceId ?? ""; + message.status = object.status ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ClaimAgentStatusRequest.$type, ClaimAgentStatusRequest); + +const baseClaimAgentStatusResponse: object = { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimAgentStatusResponse", + code: 0, +}; + +export const ClaimAgentStatusResponse = { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimAgentStatusResponse" as const, + + encode( + message: ClaimAgentStatusResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.code !== 0) { + writer.uint32(8).int64(message.code); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ClaimAgentStatusResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseClaimAgentStatusResponse, + } as ClaimAgentStatusResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.code = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClaimAgentStatusResponse { + const message = { + ...baseClaimAgentStatusResponse, + } as ClaimAgentStatusResponse; + message.code = + object.code !== undefined && object.code !== null + ? Number(object.code) + : 0; + return message; + }, + + toJSON(message: ClaimAgentStatusResponse): unknown { + const obj: any = {}; + message.code !== undefined && (obj.code = Math.round(message.code)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClaimAgentStatusResponse { + const message = { + ...baseClaimAgentStatusResponse, + } as ClaimAgentStatusResponse; + message.code = object.code ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + ClaimAgentStatusResponse.$type, + ClaimAgentStatusResponse +); + +export const AgentServiceService = { + /** Claims status for the specified agent. */ + claimStatus: { + path: "/yandex.cloud.loadtesting.agent.v1.AgentService/ClaimStatus", + requestStream: false, + responseStream: false, + requestSerialize: (value: ClaimAgentStatusRequest) => + Buffer.from(ClaimAgentStatusRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ClaimAgentStatusRequest.decode(value), + responseSerialize: (value: ClaimAgentStatusResponse) => + Buffer.from(ClaimAgentStatusResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ClaimAgentStatusResponse.decode(value), + }, +} as const; + +export interface AgentServiceServer extends UntypedServiceImplementation { + /** Claims status for the specified agent. */ + claimStatus: handleUnaryCall< + ClaimAgentStatusRequest, + ClaimAgentStatusResponse + >; +} + +export interface AgentServiceClient extends Client { + /** Claims status for the specified agent. */ + claimStatus( + request: ClaimAgentStatusRequest, + callback: ( + error: ServiceError | null, + response: ClaimAgentStatusResponse + ) => void + ): ClientUnaryCall; + claimStatus( + request: ClaimAgentStatusRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ClaimAgentStatusResponse + ) => void + ): ClientUnaryCall; + claimStatus( + request: ClaimAgentStatusRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ClaimAgentStatusResponse + ) => void + ): ClientUnaryCall; +} + +export const AgentServiceClient = makeGenericClientConstructor( + AgentServiceService, + "yandex.cloud.loadtesting.agent.v1.AgentService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): AgentServiceClient; + service: typeof AgentServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/agent/v1/job_service.ts b/src/generated/yandex/cloud/loadtesting/agent/v1/job_service.ts new file mode 100644 index 00000000..2e6a0d95 --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/agent/v1/job_service.ts @@ -0,0 +1,1484 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.loadtesting.agent.v1"; + +export interface Job { + $type: "yandex.cloud.loadtesting.agent.v1.Job"; + id: string; + config: string; + ammo?: File; + loggingLogGroupId: string; + testData?: StorageObject; + dataPayload: TestDataEntry[]; + artifactUploadSettings?: TestArtifactUploadSettings; +} + +export interface File { + $type: "yandex.cloud.loadtesting.agent.v1.File"; + name: string; + content: Buffer; +} + +export interface StorageObject { + $type: "yandex.cloud.loadtesting.agent.v1.StorageObject"; + objectStorageBucket: string; + objectStorageFilename: string; +} + +export interface TestDataEntry { + $type: "yandex.cloud.loadtesting.agent.v1.TestDataEntry"; + name: string; + isTransient: boolean; + storageObject?: StorageObject; +} + +export interface TestArtifactUploadSettings { + $type: "yandex.cloud.loadtesting.agent.v1.TestArtifactUploadSettings"; + outputBucket: string; + outputName: string; + isArchive: boolean; + filterInclude: string[]; + filterExclude: string[]; +} + +export interface GetJobTransientFile { + $type: "yandex.cloud.loadtesting.agent.v1.GetJobTransientFile"; + jobId: string; + name: string; +} + +export interface GetJobRequest { + $type: "yandex.cloud.loadtesting.agent.v1.GetJobRequest"; + computeInstanceId: string; + agentInstanceId: string; + jobId: string; +} + +export interface ClaimJobStatusRequest { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimJobStatusRequest"; + jobId: string; + status: ClaimJobStatusRequest_JobStatus; + error: string; +} + +export enum ClaimJobStatusRequest_JobStatus { + JOB_STATUS_UNSPECIFIED = 0, + POST_PROCESS = 1, + INITIATED = 2, + PREPARING = 3, + NOT_FOUND = 4, + RUNNING = 5, + FINISHING = 6, + FINISHED = 7, + STOPPED = 8, + FAILED = 9, + AUTOSTOPPED = 10, + WAITING_FOR_A_COMMAND_TO_RUN = 11, + UNRECOGNIZED = -1, +} + +export function claimJobStatusRequest_JobStatusFromJSON( + object: any +): ClaimJobStatusRequest_JobStatus { + switch (object) { + case 0: + case "JOB_STATUS_UNSPECIFIED": + return ClaimJobStatusRequest_JobStatus.JOB_STATUS_UNSPECIFIED; + case 1: + case "POST_PROCESS": + return ClaimJobStatusRequest_JobStatus.POST_PROCESS; + case 2: + case "INITIATED": + return ClaimJobStatusRequest_JobStatus.INITIATED; + case 3: + case "PREPARING": + return ClaimJobStatusRequest_JobStatus.PREPARING; + case 4: + case "NOT_FOUND": + return ClaimJobStatusRequest_JobStatus.NOT_FOUND; + case 5: + case "RUNNING": + return ClaimJobStatusRequest_JobStatus.RUNNING; + case 6: + case "FINISHING": + return ClaimJobStatusRequest_JobStatus.FINISHING; + case 7: + case "FINISHED": + return ClaimJobStatusRequest_JobStatus.FINISHED; + case 8: + case "STOPPED": + return ClaimJobStatusRequest_JobStatus.STOPPED; + case 9: + case "FAILED": + return ClaimJobStatusRequest_JobStatus.FAILED; + case 10: + case "AUTOSTOPPED": + return ClaimJobStatusRequest_JobStatus.AUTOSTOPPED; + case 11: + case "WAITING_FOR_A_COMMAND_TO_RUN": + return ClaimJobStatusRequest_JobStatus.WAITING_FOR_A_COMMAND_TO_RUN; + case -1: + case "UNRECOGNIZED": + default: + return ClaimJobStatusRequest_JobStatus.UNRECOGNIZED; + } +} + +export function claimJobStatusRequest_JobStatusToJSON( + object: ClaimJobStatusRequest_JobStatus +): string { + switch (object) { + case ClaimJobStatusRequest_JobStatus.JOB_STATUS_UNSPECIFIED: + return "JOB_STATUS_UNSPECIFIED"; + case ClaimJobStatusRequest_JobStatus.POST_PROCESS: + return "POST_PROCESS"; + case ClaimJobStatusRequest_JobStatus.INITIATED: + return "INITIATED"; + case ClaimJobStatusRequest_JobStatus.PREPARING: + return "PREPARING"; + case ClaimJobStatusRequest_JobStatus.NOT_FOUND: + return "NOT_FOUND"; + case ClaimJobStatusRequest_JobStatus.RUNNING: + return "RUNNING"; + case ClaimJobStatusRequest_JobStatus.FINISHING: + return "FINISHING"; + case ClaimJobStatusRequest_JobStatus.FINISHED: + return "FINISHED"; + case ClaimJobStatusRequest_JobStatus.STOPPED: + return "STOPPED"; + case ClaimJobStatusRequest_JobStatus.FAILED: + return "FAILED"; + case ClaimJobStatusRequest_JobStatus.AUTOSTOPPED: + return "AUTOSTOPPED"; + case ClaimJobStatusRequest_JobStatus.WAITING_FOR_A_COMMAND_TO_RUN: + return "WAITING_FOR_A_COMMAND_TO_RUN"; + default: + return "UNKNOWN"; + } +} + +export interface ClaimJobStatusResponse { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimJobStatusResponse"; + code: number; +} + +export interface JobSignalRequest { + $type: "yandex.cloud.loadtesting.agent.v1.JobSignalRequest"; + jobId: string; +} + +export interface JobSignalResponse { + $type: "yandex.cloud.loadtesting.agent.v1.JobSignalResponse"; + signal: JobSignalResponse_Signal; + /** seconds */ + waitDuration: number; + /** seconds */ + runIn: number; +} + +export enum JobSignalResponse_Signal { + SIGNAL_UNSPECIFIED = 0, + STOP = 1, + WAIT = 2, + RUN_IN = 3, + UNRECOGNIZED = -1, +} + +export function jobSignalResponse_SignalFromJSON( + object: any +): JobSignalResponse_Signal { + switch (object) { + case 0: + case "SIGNAL_UNSPECIFIED": + return JobSignalResponse_Signal.SIGNAL_UNSPECIFIED; + case 1: + case "STOP": + return JobSignalResponse_Signal.STOP; + case 2: + case "WAIT": + return JobSignalResponse_Signal.WAIT; + case 3: + case "RUN_IN": + return JobSignalResponse_Signal.RUN_IN; + case -1: + case "UNRECOGNIZED": + default: + return JobSignalResponse_Signal.UNRECOGNIZED; + } +} + +export function jobSignalResponse_SignalToJSON( + object: JobSignalResponse_Signal +): string { + switch (object) { + case JobSignalResponse_Signal.SIGNAL_UNSPECIFIED: + return "SIGNAL_UNSPECIFIED"; + case JobSignalResponse_Signal.STOP: + return "STOP"; + case JobSignalResponse_Signal.WAIT: + return "WAIT"; + case JobSignalResponse_Signal.RUN_IN: + return "RUN_IN"; + default: + return "UNKNOWN"; + } +} + +const baseJob: object = { + $type: "yandex.cloud.loadtesting.agent.v1.Job", + id: "", + config: "", + loggingLogGroupId: "", +}; + +export const Job = { + $type: "yandex.cloud.loadtesting.agent.v1.Job" as const, + + encode(message: Job, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.config !== "") { + writer.uint32(18).string(message.config); + } + if (message.ammo !== undefined) { + File.encode(message.ammo, writer.uint32(26).fork()).ldelim(); + } + if (message.loggingLogGroupId !== "") { + writer.uint32(34).string(message.loggingLogGroupId); + } + if (message.testData !== undefined) { + StorageObject.encode(message.testData, writer.uint32(42).fork()).ldelim(); + } + for (const v of message.dataPayload) { + TestDataEntry.encode(v!, writer.uint32(50).fork()).ldelim(); + } + if (message.artifactUploadSettings !== undefined) { + TestArtifactUploadSettings.encode( + message.artifactUploadSettings, + writer.uint32(58).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Job { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseJob } as Job; + message.dataPayload = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.config = reader.string(); + break; + case 3: + message.ammo = File.decode(reader, reader.uint32()); + break; + case 4: + message.loggingLogGroupId = reader.string(); + break; + case 5: + message.testData = StorageObject.decode(reader, reader.uint32()); + break; + case 6: + message.dataPayload.push( + TestDataEntry.decode(reader, reader.uint32()) + ); + break; + case 7: + message.artifactUploadSettings = TestArtifactUploadSettings.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Job { + const message = { ...baseJob } as Job; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.config = + object.config !== undefined && object.config !== null + ? String(object.config) + : ""; + message.ammo = + object.ammo !== undefined && object.ammo !== null + ? File.fromJSON(object.ammo) + : undefined; + message.loggingLogGroupId = + object.loggingLogGroupId !== undefined && + object.loggingLogGroupId !== null + ? String(object.loggingLogGroupId) + : ""; + message.testData = + object.testData !== undefined && object.testData !== null + ? StorageObject.fromJSON(object.testData) + : undefined; + message.dataPayload = (object.dataPayload ?? []).map((e: any) => + TestDataEntry.fromJSON(e) + ); + message.artifactUploadSettings = + object.artifactUploadSettings !== undefined && + object.artifactUploadSettings !== null + ? TestArtifactUploadSettings.fromJSON(object.artifactUploadSettings) + : undefined; + return message; + }, + + toJSON(message: Job): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.config !== undefined && (obj.config = message.config); + message.ammo !== undefined && + (obj.ammo = message.ammo ? File.toJSON(message.ammo) : undefined); + message.loggingLogGroupId !== undefined && + (obj.loggingLogGroupId = message.loggingLogGroupId); + message.testData !== undefined && + (obj.testData = message.testData + ? StorageObject.toJSON(message.testData) + : undefined); + if (message.dataPayload) { + obj.dataPayload = message.dataPayload.map((e) => + e ? TestDataEntry.toJSON(e) : undefined + ); + } else { + obj.dataPayload = []; + } + message.artifactUploadSettings !== undefined && + (obj.artifactUploadSettings = message.artifactUploadSettings + ? TestArtifactUploadSettings.toJSON(message.artifactUploadSettings) + : undefined); + return obj; + }, + + fromPartial, I>>(object: I): Job { + const message = { ...baseJob } as Job; + message.id = object.id ?? ""; + message.config = object.config ?? ""; + message.ammo = + object.ammo !== undefined && object.ammo !== null + ? File.fromPartial(object.ammo) + : undefined; + message.loggingLogGroupId = object.loggingLogGroupId ?? ""; + message.testData = + object.testData !== undefined && object.testData !== null + ? StorageObject.fromPartial(object.testData) + : undefined; + message.dataPayload = + object.dataPayload?.map((e) => TestDataEntry.fromPartial(e)) || []; + message.artifactUploadSettings = + object.artifactUploadSettings !== undefined && + object.artifactUploadSettings !== null + ? TestArtifactUploadSettings.fromPartial(object.artifactUploadSettings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Job.$type, Job); + +const baseFile: object = { + $type: "yandex.cloud.loadtesting.agent.v1.File", + name: "", +}; + +export const File = { + $type: "yandex.cloud.loadtesting.agent.v1.File" as const, + + encode(message: File, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.content.length !== 0) { + writer.uint32(18).bytes(message.content); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): File { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseFile } as File; + message.content = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.content = reader.bytes() as Buffer; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): File { + const message = { ...baseFile } as File; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.content = + object.content !== undefined && object.content !== null + ? Buffer.from(bytesFromBase64(object.content)) + : Buffer.alloc(0); + return message; + }, + + toJSON(message: File): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.content !== undefined && + (obj.content = base64FromBytes( + message.content !== undefined ? message.content : Buffer.alloc(0) + )); + return obj; + }, + + fromPartial, I>>(object: I): File { + const message = { ...baseFile } as File; + message.name = object.name ?? ""; + message.content = object.content ?? Buffer.alloc(0); + return message; + }, +}; + +messageTypeRegistry.set(File.$type, File); + +const baseStorageObject: object = { + $type: "yandex.cloud.loadtesting.agent.v1.StorageObject", + objectStorageBucket: "", + objectStorageFilename: "", +}; + +export const StorageObject = { + $type: "yandex.cloud.loadtesting.agent.v1.StorageObject" as const, + + encode( + message: StorageObject, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.objectStorageBucket !== "") { + writer.uint32(10).string(message.objectStorageBucket); + } + if (message.objectStorageFilename !== "") { + writer.uint32(18).string(message.objectStorageFilename); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StorageObject { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStorageObject } as StorageObject; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.objectStorageBucket = reader.string(); + break; + case 2: + message.objectStorageFilename = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StorageObject { + const message = { ...baseStorageObject } as StorageObject; + message.objectStorageBucket = + object.objectStorageBucket !== undefined && + object.objectStorageBucket !== null + ? String(object.objectStorageBucket) + : ""; + message.objectStorageFilename = + object.objectStorageFilename !== undefined && + object.objectStorageFilename !== null + ? String(object.objectStorageFilename) + : ""; + return message; + }, + + toJSON(message: StorageObject): unknown { + const obj: any = {}; + message.objectStorageBucket !== undefined && + (obj.objectStorageBucket = message.objectStorageBucket); + message.objectStorageFilename !== undefined && + (obj.objectStorageFilename = message.objectStorageFilename); + return obj; + }, + + fromPartial, I>>( + object: I + ): StorageObject { + const message = { ...baseStorageObject } as StorageObject; + message.objectStorageBucket = object.objectStorageBucket ?? ""; + message.objectStorageFilename = object.objectStorageFilename ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StorageObject.$type, StorageObject); + +const baseTestDataEntry: object = { + $type: "yandex.cloud.loadtesting.agent.v1.TestDataEntry", + name: "", + isTransient: false, +}; + +export const TestDataEntry = { + $type: "yandex.cloud.loadtesting.agent.v1.TestDataEntry" as const, + + encode( + message: TestDataEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.isTransient === true) { + writer.uint32(16).bool(message.isTransient); + } + if (message.storageObject !== undefined) { + StorageObject.encode( + message.storageObject, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): TestDataEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTestDataEntry } as TestDataEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.isTransient = reader.bool(); + break; + case 3: + message.storageObject = StorageObject.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TestDataEntry { + const message = { ...baseTestDataEntry } as TestDataEntry; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.isTransient = + object.isTransient !== undefined && object.isTransient !== null + ? Boolean(object.isTransient) + : false; + message.storageObject = + object.storageObject !== undefined && object.storageObject !== null + ? StorageObject.fromJSON(object.storageObject) + : undefined; + return message; + }, + + toJSON(message: TestDataEntry): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.isTransient !== undefined && + (obj.isTransient = message.isTransient); + message.storageObject !== undefined && + (obj.storageObject = message.storageObject + ? StorageObject.toJSON(message.storageObject) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): TestDataEntry { + const message = { ...baseTestDataEntry } as TestDataEntry; + message.name = object.name ?? ""; + message.isTransient = object.isTransient ?? false; + message.storageObject = + object.storageObject !== undefined && object.storageObject !== null + ? StorageObject.fromPartial(object.storageObject) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(TestDataEntry.$type, TestDataEntry); + +const baseTestArtifactUploadSettings: object = { + $type: "yandex.cloud.loadtesting.agent.v1.TestArtifactUploadSettings", + outputBucket: "", + outputName: "", + isArchive: false, + filterInclude: "", + filterExclude: "", +}; + +export const TestArtifactUploadSettings = { + $type: + "yandex.cloud.loadtesting.agent.v1.TestArtifactUploadSettings" as const, + + encode( + message: TestArtifactUploadSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.outputBucket !== "") { + writer.uint32(10).string(message.outputBucket); + } + if (message.outputName !== "") { + writer.uint32(18).string(message.outputName); + } + if (message.isArchive === true) { + writer.uint32(24).bool(message.isArchive); + } + for (const v of message.filterInclude) { + writer.uint32(34).string(v!); + } + for (const v of message.filterExclude) { + writer.uint32(42).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): TestArtifactUploadSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseTestArtifactUploadSettings, + } as TestArtifactUploadSettings; + message.filterInclude = []; + message.filterExclude = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.outputBucket = reader.string(); + break; + case 2: + message.outputName = reader.string(); + break; + case 3: + message.isArchive = reader.bool(); + break; + case 4: + message.filterInclude.push(reader.string()); + break; + case 5: + message.filterExclude.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TestArtifactUploadSettings { + const message = { + ...baseTestArtifactUploadSettings, + } as TestArtifactUploadSettings; + message.outputBucket = + object.outputBucket !== undefined && object.outputBucket !== null + ? String(object.outputBucket) + : ""; + message.outputName = + object.outputName !== undefined && object.outputName !== null + ? String(object.outputName) + : ""; + message.isArchive = + object.isArchive !== undefined && object.isArchive !== null + ? Boolean(object.isArchive) + : false; + message.filterInclude = (object.filterInclude ?? []).map((e: any) => + String(e) + ); + message.filterExclude = (object.filterExclude ?? []).map((e: any) => + String(e) + ); + return message; + }, + + toJSON(message: TestArtifactUploadSettings): unknown { + const obj: any = {}; + message.outputBucket !== undefined && + (obj.outputBucket = message.outputBucket); + message.outputName !== undefined && (obj.outputName = message.outputName); + message.isArchive !== undefined && (obj.isArchive = message.isArchive); + if (message.filterInclude) { + obj.filterInclude = message.filterInclude.map((e) => e); + } else { + obj.filterInclude = []; + } + if (message.filterExclude) { + obj.filterExclude = message.filterExclude.map((e) => e); + } else { + obj.filterExclude = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): TestArtifactUploadSettings { + const message = { + ...baseTestArtifactUploadSettings, + } as TestArtifactUploadSettings; + message.outputBucket = object.outputBucket ?? ""; + message.outputName = object.outputName ?? ""; + message.isArchive = object.isArchive ?? false; + message.filterInclude = object.filterInclude?.map((e) => e) || []; + message.filterExclude = object.filterExclude?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + TestArtifactUploadSettings.$type, + TestArtifactUploadSettings +); + +const baseGetJobTransientFile: object = { + $type: "yandex.cloud.loadtesting.agent.v1.GetJobTransientFile", + jobId: "", + name: "", +}; + +export const GetJobTransientFile = { + $type: "yandex.cloud.loadtesting.agent.v1.GetJobTransientFile" as const, + + encode( + message: GetJobTransientFile, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.jobId !== "") { + writer.uint32(10).string(message.jobId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetJobTransientFile { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetJobTransientFile } as GetJobTransientFile; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.jobId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetJobTransientFile { + const message = { ...baseGetJobTransientFile } as GetJobTransientFile; + message.jobId = + object.jobId !== undefined && object.jobId !== null + ? String(object.jobId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: GetJobTransientFile): unknown { + const obj: any = {}; + message.jobId !== undefined && (obj.jobId = message.jobId); + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetJobTransientFile { + const message = { ...baseGetJobTransientFile } as GetJobTransientFile; + message.jobId = object.jobId ?? ""; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetJobTransientFile.$type, GetJobTransientFile); + +const baseGetJobRequest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.GetJobRequest", + computeInstanceId: "", + agentInstanceId: "", + jobId: "", +}; + +export const GetJobRequest = { + $type: "yandex.cloud.loadtesting.agent.v1.GetJobRequest" as const, + + encode( + message: GetJobRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.agentInstanceId !== "") { + writer.uint32(18).string(message.agentInstanceId); + } + if (message.jobId !== "") { + writer.uint32(26).string(message.jobId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetJobRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetJobRequest } as GetJobRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.agentInstanceId = reader.string(); + break; + case 3: + message.jobId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetJobRequest { + const message = { ...baseGetJobRequest } as GetJobRequest; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.agentInstanceId = + object.agentInstanceId !== undefined && object.agentInstanceId !== null + ? String(object.agentInstanceId) + : ""; + message.jobId = + object.jobId !== undefined && object.jobId !== null + ? String(object.jobId) + : ""; + return message; + }, + + toJSON(message: GetJobRequest): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.agentInstanceId !== undefined && + (obj.agentInstanceId = message.agentInstanceId); + message.jobId !== undefined && (obj.jobId = message.jobId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetJobRequest { + const message = { ...baseGetJobRequest } as GetJobRequest; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.agentInstanceId = object.agentInstanceId ?? ""; + message.jobId = object.jobId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetJobRequest.$type, GetJobRequest); + +const baseClaimJobStatusRequest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimJobStatusRequest", + jobId: "", + status: 0, + error: "", +}; + +export const ClaimJobStatusRequest = { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimJobStatusRequest" as const, + + encode( + message: ClaimJobStatusRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.jobId !== "") { + writer.uint32(10).string(message.jobId); + } + if (message.status !== 0) { + writer.uint32(16).int32(message.status); + } + if (message.error !== "") { + writer.uint32(26).string(message.error); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ClaimJobStatusRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClaimJobStatusRequest } as ClaimJobStatusRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.jobId = reader.string(); + break; + case 2: + message.status = reader.int32() as any; + break; + case 3: + message.error = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClaimJobStatusRequest { + const message = { ...baseClaimJobStatusRequest } as ClaimJobStatusRequest; + message.jobId = + object.jobId !== undefined && object.jobId !== null + ? String(object.jobId) + : ""; + message.status = + object.status !== undefined && object.status !== null + ? claimJobStatusRequest_JobStatusFromJSON(object.status) + : 0; + message.error = + object.error !== undefined && object.error !== null + ? String(object.error) + : ""; + return message; + }, + + toJSON(message: ClaimJobStatusRequest): unknown { + const obj: any = {}; + message.jobId !== undefined && (obj.jobId = message.jobId); + message.status !== undefined && + (obj.status = claimJobStatusRequest_JobStatusToJSON(message.status)); + message.error !== undefined && (obj.error = message.error); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClaimJobStatusRequest { + const message = { ...baseClaimJobStatusRequest } as ClaimJobStatusRequest; + message.jobId = object.jobId ?? ""; + message.status = object.status ?? 0; + message.error = object.error ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ClaimJobStatusRequest.$type, ClaimJobStatusRequest); + +const baseClaimJobStatusResponse: object = { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimJobStatusResponse", + code: 0, +}; + +export const ClaimJobStatusResponse = { + $type: "yandex.cloud.loadtesting.agent.v1.ClaimJobStatusResponse" as const, + + encode( + message: ClaimJobStatusResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.code !== 0) { + writer.uint32(8).int64(message.code); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ClaimJobStatusResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClaimJobStatusResponse } as ClaimJobStatusResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.code = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClaimJobStatusResponse { + const message = { ...baseClaimJobStatusResponse } as ClaimJobStatusResponse; + message.code = + object.code !== undefined && object.code !== null + ? Number(object.code) + : 0; + return message; + }, + + toJSON(message: ClaimJobStatusResponse): unknown { + const obj: any = {}; + message.code !== undefined && (obj.code = Math.round(message.code)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClaimJobStatusResponse { + const message = { ...baseClaimJobStatusResponse } as ClaimJobStatusResponse; + message.code = object.code ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ClaimJobStatusResponse.$type, ClaimJobStatusResponse); + +const baseJobSignalRequest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.JobSignalRequest", + jobId: "", +}; + +export const JobSignalRequest = { + $type: "yandex.cloud.loadtesting.agent.v1.JobSignalRequest" as const, + + encode( + message: JobSignalRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.jobId !== "") { + writer.uint32(10).string(message.jobId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): JobSignalRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseJobSignalRequest } as JobSignalRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.jobId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): JobSignalRequest { + const message = { ...baseJobSignalRequest } as JobSignalRequest; + message.jobId = + object.jobId !== undefined && object.jobId !== null + ? String(object.jobId) + : ""; + return message; + }, + + toJSON(message: JobSignalRequest): unknown { + const obj: any = {}; + message.jobId !== undefined && (obj.jobId = message.jobId); + return obj; + }, + + fromPartial, I>>( + object: I + ): JobSignalRequest { + const message = { ...baseJobSignalRequest } as JobSignalRequest; + message.jobId = object.jobId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(JobSignalRequest.$type, JobSignalRequest); + +const baseJobSignalResponse: object = { + $type: "yandex.cloud.loadtesting.agent.v1.JobSignalResponse", + signal: 0, + waitDuration: 0, + runIn: 0, +}; + +export const JobSignalResponse = { + $type: "yandex.cloud.loadtesting.agent.v1.JobSignalResponse" as const, + + encode( + message: JobSignalResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.signal !== 0) { + writer.uint32(8).int32(message.signal); + } + if (message.waitDuration !== 0) { + writer.uint32(17).double(message.waitDuration); + } + if (message.runIn !== 0) { + writer.uint32(25).double(message.runIn); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): JobSignalResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseJobSignalResponse } as JobSignalResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.signal = reader.int32() as any; + break; + case 2: + message.waitDuration = reader.double(); + break; + case 3: + message.runIn = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): JobSignalResponse { + const message = { ...baseJobSignalResponse } as JobSignalResponse; + message.signal = + object.signal !== undefined && object.signal !== null + ? jobSignalResponse_SignalFromJSON(object.signal) + : 0; + message.waitDuration = + object.waitDuration !== undefined && object.waitDuration !== null + ? Number(object.waitDuration) + : 0; + message.runIn = + object.runIn !== undefined && object.runIn !== null + ? Number(object.runIn) + : 0; + return message; + }, + + toJSON(message: JobSignalResponse): unknown { + const obj: any = {}; + message.signal !== undefined && + (obj.signal = jobSignalResponse_SignalToJSON(message.signal)); + message.waitDuration !== undefined && + (obj.waitDuration = message.waitDuration); + message.runIn !== undefined && (obj.runIn = message.runIn); + return obj; + }, + + fromPartial, I>>( + object: I + ): JobSignalResponse { + const message = { ...baseJobSignalResponse } as JobSignalResponse; + message.signal = object.signal ?? 0; + message.waitDuration = object.waitDuration ?? 0; + message.runIn = object.runIn ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(JobSignalResponse.$type, JobSignalResponse); + +export const JobServiceService = { + /** Claims status for the specified job. */ + claimStatus: { + path: "/yandex.cloud.loadtesting.agent.v1.JobService/ClaimStatus", + requestStream: false, + responseStream: false, + requestSerialize: (value: ClaimJobStatusRequest) => + Buffer.from(ClaimJobStatusRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ClaimJobStatusRequest.decode(value), + responseSerialize: (value: ClaimJobStatusResponse) => + Buffer.from(ClaimJobStatusResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ClaimJobStatusResponse.decode(value), + }, + /** Returns the job for the specified agent. */ + get: { + path: "/yandex.cloud.loadtesting.agent.v1.JobService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetJobRequest) => + Buffer.from(GetJobRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetJobRequest.decode(value), + responseSerialize: (value: Job) => Buffer.from(Job.encode(value).finish()), + responseDeserialize: (value: Buffer) => Job.decode(value), + }, + /** Returns the signal for the specified job. */ + getSignal: { + path: "/yandex.cloud.loadtesting.agent.v1.JobService/GetSignal", + requestStream: false, + responseStream: false, + requestSerialize: (value: JobSignalRequest) => + Buffer.from(JobSignalRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => JobSignalRequest.decode(value), + responseSerialize: (value: JobSignalResponse) => + Buffer.from(JobSignalResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => JobSignalResponse.decode(value), + }, + getTransientFile: { + path: "/yandex.cloud.loadtesting.agent.v1.JobService/GetTransientFile", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetJobTransientFile) => + Buffer.from(GetJobTransientFile.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetJobTransientFile.decode(value), + responseSerialize: (value: File) => + Buffer.from(File.encode(value).finish()), + responseDeserialize: (value: Buffer) => File.decode(value), + }, +} as const; + +export interface JobServiceServer extends UntypedServiceImplementation { + /** Claims status for the specified job. */ + claimStatus: handleUnaryCall; + /** Returns the job for the specified agent. */ + get: handleUnaryCall; + /** Returns the signal for the specified job. */ + getSignal: handleUnaryCall; + getTransientFile: handleUnaryCall; +} + +export interface JobServiceClient extends Client { + /** Claims status for the specified job. */ + claimStatus( + request: ClaimJobStatusRequest, + callback: ( + error: ServiceError | null, + response: ClaimJobStatusResponse + ) => void + ): ClientUnaryCall; + claimStatus( + request: ClaimJobStatusRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ClaimJobStatusResponse + ) => void + ): ClientUnaryCall; + claimStatus( + request: ClaimJobStatusRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ClaimJobStatusResponse + ) => void + ): ClientUnaryCall; + /** Returns the job for the specified agent. */ + get( + request: GetJobRequest, + callback: (error: ServiceError | null, response: Job) => void + ): ClientUnaryCall; + get( + request: GetJobRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Job) => void + ): ClientUnaryCall; + get( + request: GetJobRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Job) => void + ): ClientUnaryCall; + /** Returns the signal for the specified job. */ + getSignal( + request: JobSignalRequest, + callback: (error: ServiceError | null, response: JobSignalResponse) => void + ): ClientUnaryCall; + getSignal( + request: JobSignalRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: JobSignalResponse) => void + ): ClientUnaryCall; + getSignal( + request: JobSignalRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: JobSignalResponse) => void + ): ClientUnaryCall; + getTransientFile( + request: GetJobTransientFile, + callback: (error: ServiceError | null, response: File) => void + ): ClientUnaryCall; + getTransientFile( + request: GetJobTransientFile, + metadata: Metadata, + callback: (error: ServiceError | null, response: File) => void + ): ClientUnaryCall; + getTransientFile( + request: GetJobTransientFile, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: File) => void + ): ClientUnaryCall; +} + +export const JobServiceClient = makeGenericClientConstructor( + JobServiceService, + "yandex.cloud.loadtesting.agent.v1.JobService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): JobServiceClient; + service: typeof JobServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (const byte of arr) { + bin.push(String.fromCharCode(byte)); + } + return btoa(bin.join("")); +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/agent/v1/monitoring_service.ts b/src/generated/yandex/cloud/loadtesting/agent/v1/monitoring_service.ts new file mode 100644 index 00000000..72f8d4bd --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/agent/v1/monitoring_service.ts @@ -0,0 +1,525 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.loadtesting.agent.v1"; + +export interface AddMetricRequest { + $type: "yandex.cloud.loadtesting.agent.v1.AddMetricRequest"; + computeInstanceId: string; + jobId: string; + chunks: MetricChunk[]; + agentInstanceId: string; +} + +export interface MetricChunk { + $type: "yandex.cloud.loadtesting.agent.v1.MetricChunk"; + data: Metric[]; + timestamp: number; + comment: string; + instanceHost: string; +} + +export interface Metric { + $type: "yandex.cloud.loadtesting.agent.v1.Metric"; + metricType: string; + metricName: string; + metricValue: number; +} + +export interface AddMetricResponse { + $type: "yandex.cloud.loadtesting.agent.v1.AddMetricResponse"; + metricTrailId: string; + code: number; +} + +const baseAddMetricRequest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.AddMetricRequest", + computeInstanceId: "", + jobId: "", + agentInstanceId: "", +}; + +export const AddMetricRequest = { + $type: "yandex.cloud.loadtesting.agent.v1.AddMetricRequest" as const, + + encode( + message: AddMetricRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + if (message.jobId !== "") { + writer.uint32(26).string(message.jobId); + } + for (const v of message.chunks) { + MetricChunk.encode(v!, writer.uint32(34).fork()).ldelim(); + } + if (message.agentInstanceId !== "") { + writer.uint32(42).string(message.agentInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AddMetricRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAddMetricRequest } as AddMetricRequest; + message.chunks = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 3: + message.jobId = reader.string(); + break; + case 4: + message.chunks.push(MetricChunk.decode(reader, reader.uint32())); + break; + case 5: + message.agentInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddMetricRequest { + const message = { ...baseAddMetricRequest } as AddMetricRequest; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.jobId = + object.jobId !== undefined && object.jobId !== null + ? String(object.jobId) + : ""; + message.chunks = (object.chunks ?? []).map((e: any) => + MetricChunk.fromJSON(e) + ); + message.agentInstanceId = + object.agentInstanceId !== undefined && object.agentInstanceId !== null + ? String(object.agentInstanceId) + : ""; + return message; + }, + + toJSON(message: AddMetricRequest): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.jobId !== undefined && (obj.jobId = message.jobId); + if (message.chunks) { + obj.chunks = message.chunks.map((e) => + e ? MetricChunk.toJSON(e) : undefined + ); + } else { + obj.chunks = []; + } + message.agentInstanceId !== undefined && + (obj.agentInstanceId = message.agentInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddMetricRequest { + const message = { ...baseAddMetricRequest } as AddMetricRequest; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.jobId = object.jobId ?? ""; + message.chunks = + object.chunks?.map((e) => MetricChunk.fromPartial(e)) || []; + message.agentInstanceId = object.agentInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AddMetricRequest.$type, AddMetricRequest); + +const baseMetricChunk: object = { + $type: "yandex.cloud.loadtesting.agent.v1.MetricChunk", + timestamp: 0, + comment: "", + instanceHost: "", +}; + +export const MetricChunk = { + $type: "yandex.cloud.loadtesting.agent.v1.MetricChunk" as const, + + encode( + message: MetricChunk, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.data) { + Metric.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.timestamp !== 0) { + writer.uint32(16).int64(message.timestamp); + } + if (message.comment !== "") { + writer.uint32(26).string(message.comment); + } + if (message.instanceHost !== "") { + writer.uint32(34).string(message.instanceHost); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MetricChunk { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMetricChunk } as MetricChunk; + message.data = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.data.push(Metric.decode(reader, reader.uint32())); + break; + case 2: + message.timestamp = longToNumber(reader.int64() as Long); + break; + case 3: + message.comment = reader.string(); + break; + case 4: + message.instanceHost = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MetricChunk { + const message = { ...baseMetricChunk } as MetricChunk; + message.data = (object.data ?? []).map((e: any) => Metric.fromJSON(e)); + message.timestamp = + object.timestamp !== undefined && object.timestamp !== null + ? Number(object.timestamp) + : 0; + message.comment = + object.comment !== undefined && object.comment !== null + ? String(object.comment) + : ""; + message.instanceHost = + object.instanceHost !== undefined && object.instanceHost !== null + ? String(object.instanceHost) + : ""; + return message; + }, + + toJSON(message: MetricChunk): unknown { + const obj: any = {}; + if (message.data) { + obj.data = message.data.map((e) => (e ? Metric.toJSON(e) : undefined)); + } else { + obj.data = []; + } + message.timestamp !== undefined && + (obj.timestamp = Math.round(message.timestamp)); + message.comment !== undefined && (obj.comment = message.comment); + message.instanceHost !== undefined && + (obj.instanceHost = message.instanceHost); + return obj; + }, + + fromPartial, I>>( + object: I + ): MetricChunk { + const message = { ...baseMetricChunk } as MetricChunk; + message.data = object.data?.map((e) => Metric.fromPartial(e)) || []; + message.timestamp = object.timestamp ?? 0; + message.comment = object.comment ?? ""; + message.instanceHost = object.instanceHost ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MetricChunk.$type, MetricChunk); + +const baseMetric: object = { + $type: "yandex.cloud.loadtesting.agent.v1.Metric", + metricType: "", + metricName: "", + metricValue: 0, +}; + +export const Metric = { + $type: "yandex.cloud.loadtesting.agent.v1.Metric" as const, + + encode( + message: Metric, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.metricType !== "") { + writer.uint32(10).string(message.metricType); + } + if (message.metricName !== "") { + writer.uint32(18).string(message.metricName); + } + if (message.metricValue !== 0) { + writer.uint32(25).double(message.metricValue); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Metric { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMetric } as Metric; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.metricType = reader.string(); + break; + case 2: + message.metricName = reader.string(); + break; + case 3: + message.metricValue = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Metric { + const message = { ...baseMetric } as Metric; + message.metricType = + object.metricType !== undefined && object.metricType !== null + ? String(object.metricType) + : ""; + message.metricName = + object.metricName !== undefined && object.metricName !== null + ? String(object.metricName) + : ""; + message.metricValue = + object.metricValue !== undefined && object.metricValue !== null + ? Number(object.metricValue) + : 0; + return message; + }, + + toJSON(message: Metric): unknown { + const obj: any = {}; + message.metricType !== undefined && (obj.metricType = message.metricType); + message.metricName !== undefined && (obj.metricName = message.metricName); + message.metricValue !== undefined && + (obj.metricValue = message.metricValue); + return obj; + }, + + fromPartial, I>>(object: I): Metric { + const message = { ...baseMetric } as Metric; + message.metricType = object.metricType ?? ""; + message.metricName = object.metricName ?? ""; + message.metricValue = object.metricValue ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Metric.$type, Metric); + +const baseAddMetricResponse: object = { + $type: "yandex.cloud.loadtesting.agent.v1.AddMetricResponse", + metricTrailId: "", + code: 0, +}; + +export const AddMetricResponse = { + $type: "yandex.cloud.loadtesting.agent.v1.AddMetricResponse" as const, + + encode( + message: AddMetricResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.metricTrailId !== "") { + writer.uint32(10).string(message.metricTrailId); + } + if (message.code !== 0) { + writer.uint32(16).int64(message.code); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AddMetricResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAddMetricResponse } as AddMetricResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.metricTrailId = reader.string(); + break; + case 2: + message.code = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddMetricResponse { + const message = { ...baseAddMetricResponse } as AddMetricResponse; + message.metricTrailId = + object.metricTrailId !== undefined && object.metricTrailId !== null + ? String(object.metricTrailId) + : ""; + message.code = + object.code !== undefined && object.code !== null + ? Number(object.code) + : 0; + return message; + }, + + toJSON(message: AddMetricResponse): unknown { + const obj: any = {}; + message.metricTrailId !== undefined && + (obj.metricTrailId = message.metricTrailId); + message.code !== undefined && (obj.code = Math.round(message.code)); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddMetricResponse { + const message = { ...baseAddMetricResponse } as AddMetricResponse; + message.metricTrailId = object.metricTrailId ?? ""; + message.code = object.code ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(AddMetricResponse.$type, AddMetricResponse); + +export const MonitoringServiceService = { + /** Saves monitoring events for specified job */ + addMetric: { + path: "/yandex.cloud.loadtesting.agent.v1.MonitoringService/AddMetric", + requestStream: false, + responseStream: false, + requestSerialize: (value: AddMetricRequest) => + Buffer.from(AddMetricRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => AddMetricRequest.decode(value), + responseSerialize: (value: AddMetricResponse) => + Buffer.from(AddMetricResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => AddMetricResponse.decode(value), + }, +} as const; + +export interface MonitoringServiceServer extends UntypedServiceImplementation { + /** Saves monitoring events for specified job */ + addMetric: handleUnaryCall; +} + +export interface MonitoringServiceClient extends Client { + /** Saves monitoring events for specified job */ + addMetric( + request: AddMetricRequest, + callback: (error: ServiceError | null, response: AddMetricResponse) => void + ): ClientUnaryCall; + addMetric( + request: AddMetricRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: AddMetricResponse) => void + ): ClientUnaryCall; + addMetric( + request: AddMetricRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: AddMetricResponse) => void + ): ClientUnaryCall; +} + +export const MonitoringServiceClient = makeGenericClientConstructor( + MonitoringServiceService, + "yandex.cloud.loadtesting.agent.v1.MonitoringService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): MonitoringServiceClient; + service: typeof MonitoringServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/agent/v1/test.ts b/src/generated/yandex/cloud/loadtesting/agent/v1/test.ts new file mode 100644 index 00000000..3fb2d0a7 --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/agent/v1/test.ts @@ -0,0 +1,1144 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.loadtesting.agent.v1"; + +/** + * Ammo type for generators: phantom.ammo_type, + * ammo.type in pandora.config_content.pool + */ +export enum AmmoType { + AMMO_TYPE_UNSPECIFIED = 0, + HTTP_JSON = 1, + RAW = 2, + URI = 3, + URIPOST = 4, + PHANTOM = 5, + ACCESS = 6, + UNRECOGNIZED = -1, +} + +export function ammoTypeFromJSON(object: any): AmmoType { + switch (object) { + case 0: + case "AMMO_TYPE_UNSPECIFIED": + return AmmoType.AMMO_TYPE_UNSPECIFIED; + case 1: + case "HTTP_JSON": + return AmmoType.HTTP_JSON; + case 2: + case "RAW": + return AmmoType.RAW; + case 3: + case "URI": + return AmmoType.URI; + case 4: + case "URIPOST": + return AmmoType.URIPOST; + case 5: + case "PHANTOM": + return AmmoType.PHANTOM; + case 6: + case "ACCESS": + return AmmoType.ACCESS; + case -1: + case "UNRECOGNIZED": + default: + return AmmoType.UNRECOGNIZED; + } +} + +export function ammoTypeToJSON(object: AmmoType): string { + switch (object) { + case AmmoType.AMMO_TYPE_UNSPECIFIED: + return "AMMO_TYPE_UNSPECIFIED"; + case AmmoType.HTTP_JSON: + return "HTTP_JSON"; + case AmmoType.RAW: + return "RAW"; + case AmmoType.URI: + return "URI"; + case AmmoType.URIPOST: + return "URIPOST"; + case AmmoType.PHANTOM: + return "PHANTOM"; + case AmmoType.ACCESS: + return "ACCESS"; + default: + return "UNKNOWN"; + } +} + +/** Load type: phantom.load_profile.load_type, */ +export enum LoadType { + LOAD_TYPE_UNSPECIFIED = 0, + RPS = 1, + INSTANCES = 2, + STPD_FILE = 3, + UNRECOGNIZED = -1, +} + +export function loadTypeFromJSON(object: any): LoadType { + switch (object) { + case 0: + case "LOAD_TYPE_UNSPECIFIED": + return LoadType.LOAD_TYPE_UNSPECIFIED; + case 1: + case "RPS": + return LoadType.RPS; + case 2: + case "INSTANCES": + return LoadType.INSTANCES; + case 3: + case "STPD_FILE": + return LoadType.STPD_FILE; + case -1: + case "UNRECOGNIZED": + default: + return LoadType.UNRECOGNIZED; + } +} + +export function loadTypeToJSON(object: LoadType): string { + switch (object) { + case LoadType.LOAD_TYPE_UNSPECIFIED: + return "LOAD_TYPE_UNSPECIFIED"; + case LoadType.RPS: + return "RPS"; + case LoadType.INSTANCES: + return "INSTANCES"; + case LoadType.STPD_FILE: + return "STPD_FILE"; + default: + return "UNKNOWN"; + } +} + +/** + * Schedule type: phantom.load_profile.schedule, + * rps.schedule.type in pandora.config_content.pool + */ +export enum ScheduleType { + SCHEDULE_TYPE_UNSPECIFIED = 0, + ONCE = 1, + CONST = 2, + LINE = 3, + STEP = 4, + UNRECOGNIZED = -1, +} + +export function scheduleTypeFromJSON(object: any): ScheduleType { + switch (object) { + case 0: + case "SCHEDULE_TYPE_UNSPECIFIED": + return ScheduleType.SCHEDULE_TYPE_UNSPECIFIED; + case 1: + case "ONCE": + return ScheduleType.ONCE; + case 2: + case "CONST": + return ScheduleType.CONST; + case 3: + case "LINE": + return ScheduleType.LINE; + case 4: + case "STEP": + return ScheduleType.STEP; + case -1: + case "UNRECOGNIZED": + default: + return ScheduleType.UNRECOGNIZED; + } +} + +export function scheduleTypeToJSON(object: ScheduleType): string { + switch (object) { + case ScheduleType.SCHEDULE_TYPE_UNSPECIFIED: + return "SCHEDULE_TYPE_UNSPECIFIED"; + case ScheduleType.ONCE: + return "ONCE"; + case ScheduleType.CONST: + return "CONST"; + case ScheduleType.LINE: + return "LINE"; + case ScheduleType.STEP: + return "STEP"; + default: + return "UNKNOWN"; + } +} + +export interface Test { + $type: "yandex.cloud.loadtesting.agent.v1.Test"; + id: string; + folderId: string; + name: string; + description: string; + labels: { [key: string]: string }; + createdAt?: Date; + startedAt?: Date; + finishedAt?: Date; + updatedAt?: Date; + generator: Test_Generator; + /** AgentInstance ID where Test is running. */ + agentInstanceId: string; + /** Target VM. */ + targetAddress: string; + targetPort: number; + /** Version of object under test. */ + targetVersion: string; + /** Test details */ + config: string; + ammoUrls: string | undefined; + ammoId: string | undefined; + cases: string[]; + status: Test_Status; + errors: string[]; + favorite: boolean; +} + +export enum Test_Status { + STATUS_UNSPECIFIED = 0, + CREATED = 1, + INITIATED = 2, + PREPARING = 3, + RUNNING = 4, + FINISHING = 5, + DONE = 6, + POST_PROCESSING = 7, + FAILED = 8, + STOPPING = 9, + STOPPED = 10, + AUTOSTOPPED = 11, + WAITING = 12, + DELETING = 13, + UNRECOGNIZED = -1, +} + +export function test_StatusFromJSON(object: any): Test_Status { + switch (object) { + case 0: + case "STATUS_UNSPECIFIED": + return Test_Status.STATUS_UNSPECIFIED; + case 1: + case "CREATED": + return Test_Status.CREATED; + case 2: + case "INITIATED": + return Test_Status.INITIATED; + case 3: + case "PREPARING": + return Test_Status.PREPARING; + case 4: + case "RUNNING": + return Test_Status.RUNNING; + case 5: + case "FINISHING": + return Test_Status.FINISHING; + case 6: + case "DONE": + return Test_Status.DONE; + case 7: + case "POST_PROCESSING": + return Test_Status.POST_PROCESSING; + case 8: + case "FAILED": + return Test_Status.FAILED; + case 9: + case "STOPPING": + return Test_Status.STOPPING; + case 10: + case "STOPPED": + return Test_Status.STOPPED; + case 11: + case "AUTOSTOPPED": + return Test_Status.AUTOSTOPPED; + case 12: + case "WAITING": + return Test_Status.WAITING; + case 13: + case "DELETING": + return Test_Status.DELETING; + case -1: + case "UNRECOGNIZED": + default: + return Test_Status.UNRECOGNIZED; + } +} + +export function test_StatusToJSON(object: Test_Status): string { + switch (object) { + case Test_Status.STATUS_UNSPECIFIED: + return "STATUS_UNSPECIFIED"; + case Test_Status.CREATED: + return "CREATED"; + case Test_Status.INITIATED: + return "INITIATED"; + case Test_Status.PREPARING: + return "PREPARING"; + case Test_Status.RUNNING: + return "RUNNING"; + case Test_Status.FINISHING: + return "FINISHING"; + case Test_Status.DONE: + return "DONE"; + case Test_Status.POST_PROCESSING: + return "POST_PROCESSING"; + case Test_Status.FAILED: + return "FAILED"; + case Test_Status.STOPPING: + return "STOPPING"; + case Test_Status.STOPPED: + return "STOPPED"; + case Test_Status.AUTOSTOPPED: + return "AUTOSTOPPED"; + case Test_Status.WAITING: + return "WAITING"; + case Test_Status.DELETING: + return "DELETING"; + default: + return "UNKNOWN"; + } +} + +export enum Test_Generator { + GENERATOR_UNSPECIFIED = 0, + PANDORA = 1, + PHANTOM = 2, + JMETER = 3, + UNRECOGNIZED = -1, +} + +export function test_GeneratorFromJSON(object: any): Test_Generator { + switch (object) { + case 0: + case "GENERATOR_UNSPECIFIED": + return Test_Generator.GENERATOR_UNSPECIFIED; + case 1: + case "PANDORA": + return Test_Generator.PANDORA; + case 2: + case "PHANTOM": + return Test_Generator.PHANTOM; + case 3: + case "JMETER": + return Test_Generator.JMETER; + case -1: + case "UNRECOGNIZED": + default: + return Test_Generator.UNRECOGNIZED; + } +} + +export function test_GeneratorToJSON(object: Test_Generator): string { + switch (object) { + case Test_Generator.GENERATOR_UNSPECIFIED: + return "GENERATOR_UNSPECIFIED"; + case Test_Generator.PANDORA: + return "PANDORA"; + case Test_Generator.PHANTOM: + return "PHANTOM"; + case Test_Generator.JMETER: + return "JMETER"; + default: + return "UNKNOWN"; + } +} + +export interface Test_LabelsEntry { + $type: "yandex.cloud.loadtesting.agent.v1.Test.LabelsEntry"; + key: string; + value: string; +} + +/** + * Load schedule string: + * {duration: 180s, type: line, from: 1, to: 10000} for pandora, + * step(25, 5, 5, 60) for phantom. + */ +export interface LoadSchedule { + $type: "yandex.cloud.loadtesting.agent.v1.LoadSchedule"; + type: ScheduleType; + /** times for pandora */ + instances: number; + duration: string; + rpsFrom: number; + rpsTo: number; + step: number; + ops: number; + stpdPath: string; +} + +/** Load profile in general: phantom.load_profile, pool schedule for pandora */ +export interface Schedule { + $type: "yandex.cloud.loadtesting.agent.v1.Schedule"; + loadType: LoadType; + loadSchedule: LoadSchedule[]; + loadProfile: string[]; +} + +const baseTest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.Test", + id: "", + folderId: "", + name: "", + description: "", + generator: 0, + agentInstanceId: "", + targetAddress: "", + targetPort: 0, + targetVersion: "", + config: "", + cases: "", + status: 0, + errors: "", + favorite: false, +}; + +export const Test = { + $type: "yandex.cloud.loadtesting.agent.v1.Test" as const, + + encode(message: Test, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + Test_LabelsEntry.encode( + { + $type: "yandex.cloud.loadtesting.agent.v1.Test.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(50).fork() + ).ldelim(); + } + if (message.startedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.startedAt), + writer.uint32(58).fork() + ).ldelim(); + } + if (message.finishedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.finishedAt), + writer.uint32(66).fork() + ).ldelim(); + } + if (message.updatedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.updatedAt), + writer.uint32(74).fork() + ).ldelim(); + } + if (message.generator !== 0) { + writer.uint32(80).int32(message.generator); + } + if (message.agentInstanceId !== "") { + writer.uint32(90).string(message.agentInstanceId); + } + if (message.targetAddress !== "") { + writer.uint32(98).string(message.targetAddress); + } + if (message.targetPort !== 0) { + writer.uint32(104).int64(message.targetPort); + } + if (message.targetVersion !== "") { + writer.uint32(114).string(message.targetVersion); + } + if (message.config !== "") { + writer.uint32(122).string(message.config); + } + if (message.ammoUrls !== undefined) { + writer.uint32(130).string(message.ammoUrls); + } + if (message.ammoId !== undefined) { + writer.uint32(138).string(message.ammoId); + } + for (const v of message.cases) { + writer.uint32(146).string(v!); + } + if (message.status !== 0) { + writer.uint32(152).int32(message.status); + } + for (const v of message.errors) { + writer.uint32(162).string(v!); + } + if (message.favorite === true) { + writer.uint32(168).bool(message.favorite); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Test { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTest } as Test; + message.labels = {}; + message.cases = []; + message.errors = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = Test_LabelsEntry.decode(reader, reader.uint32()); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + case 6: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 7: + message.startedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 8: + message.finishedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 9: + message.updatedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 10: + message.generator = reader.int32() as any; + break; + case 11: + message.agentInstanceId = reader.string(); + break; + case 12: + message.targetAddress = reader.string(); + break; + case 13: + message.targetPort = longToNumber(reader.int64() as Long); + break; + case 14: + message.targetVersion = reader.string(); + break; + case 15: + message.config = reader.string(); + break; + case 16: + message.ammoUrls = reader.string(); + break; + case 17: + message.ammoId = reader.string(); + break; + case 18: + message.cases.push(reader.string()); + break; + case 19: + message.status = reader.int32() as any; + break; + case 20: + message.errors.push(reader.string()); + break; + case 21: + message.favorite = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Test { + const message = { ...baseTest } as Test; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.startedAt = + object.startedAt !== undefined && object.startedAt !== null + ? fromJsonTimestamp(object.startedAt) + : undefined; + message.finishedAt = + object.finishedAt !== undefined && object.finishedAt !== null + ? fromJsonTimestamp(object.finishedAt) + : undefined; + message.updatedAt = + object.updatedAt !== undefined && object.updatedAt !== null + ? fromJsonTimestamp(object.updatedAt) + : undefined; + message.generator = + object.generator !== undefined && object.generator !== null + ? test_GeneratorFromJSON(object.generator) + : 0; + message.agentInstanceId = + object.agentInstanceId !== undefined && object.agentInstanceId !== null + ? String(object.agentInstanceId) + : ""; + message.targetAddress = + object.targetAddress !== undefined && object.targetAddress !== null + ? String(object.targetAddress) + : ""; + message.targetPort = + object.targetPort !== undefined && object.targetPort !== null + ? Number(object.targetPort) + : 0; + message.targetVersion = + object.targetVersion !== undefined && object.targetVersion !== null + ? String(object.targetVersion) + : ""; + message.config = + object.config !== undefined && object.config !== null + ? String(object.config) + : ""; + message.ammoUrls = + object.ammoUrls !== undefined && object.ammoUrls !== null + ? String(object.ammoUrls) + : undefined; + message.ammoId = + object.ammoId !== undefined && object.ammoId !== null + ? String(object.ammoId) + : undefined; + message.cases = (object.cases ?? []).map((e: any) => String(e)); + message.status = + object.status !== undefined && object.status !== null + ? test_StatusFromJSON(object.status) + : 0; + message.errors = (object.errors ?? []).map((e: any) => String(e)); + message.favorite = + object.favorite !== undefined && object.favorite !== null + ? Boolean(object.favorite) + : false; + return message; + }, + + toJSON(message: Test): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.startedAt !== undefined && + (obj.startedAt = message.startedAt.toISOString()); + message.finishedAt !== undefined && + (obj.finishedAt = message.finishedAt.toISOString()); + message.updatedAt !== undefined && + (obj.updatedAt = message.updatedAt.toISOString()); + message.generator !== undefined && + (obj.generator = test_GeneratorToJSON(message.generator)); + message.agentInstanceId !== undefined && + (obj.agentInstanceId = message.agentInstanceId); + message.targetAddress !== undefined && + (obj.targetAddress = message.targetAddress); + message.targetPort !== undefined && + (obj.targetPort = Math.round(message.targetPort)); + message.targetVersion !== undefined && + (obj.targetVersion = message.targetVersion); + message.config !== undefined && (obj.config = message.config); + message.ammoUrls !== undefined && (obj.ammoUrls = message.ammoUrls); + message.ammoId !== undefined && (obj.ammoId = message.ammoId); + if (message.cases) { + obj.cases = message.cases.map((e) => e); + } else { + obj.cases = []; + } + message.status !== undefined && + (obj.status = test_StatusToJSON(message.status)); + if (message.errors) { + obj.errors = message.errors.map((e) => e); + } else { + obj.errors = []; + } + message.favorite !== undefined && (obj.favorite = message.favorite); + return obj; + }, + + fromPartial, I>>(object: I): Test { + const message = { ...baseTest } as Test; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.createdAt = object.createdAt ?? undefined; + message.startedAt = object.startedAt ?? undefined; + message.finishedAt = object.finishedAt ?? undefined; + message.updatedAt = object.updatedAt ?? undefined; + message.generator = object.generator ?? 0; + message.agentInstanceId = object.agentInstanceId ?? ""; + message.targetAddress = object.targetAddress ?? ""; + message.targetPort = object.targetPort ?? 0; + message.targetVersion = object.targetVersion ?? ""; + message.config = object.config ?? ""; + message.ammoUrls = object.ammoUrls ?? undefined; + message.ammoId = object.ammoId ?? undefined; + message.cases = object.cases?.map((e) => e) || []; + message.status = object.status ?? 0; + message.errors = object.errors?.map((e) => e) || []; + message.favorite = object.favorite ?? false; + return message; + }, +}; + +messageTypeRegistry.set(Test.$type, Test); + +const baseTest_LabelsEntry: object = { + $type: "yandex.cloud.loadtesting.agent.v1.Test.LabelsEntry", + key: "", + value: "", +}; + +export const Test_LabelsEntry = { + $type: "yandex.cloud.loadtesting.agent.v1.Test.LabelsEntry" as const, + + encode( + message: Test_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Test_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTest_LabelsEntry } as Test_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Test_LabelsEntry { + const message = { ...baseTest_LabelsEntry } as Test_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Test_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): Test_LabelsEntry { + const message = { ...baseTest_LabelsEntry } as Test_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Test_LabelsEntry.$type, Test_LabelsEntry); + +const baseLoadSchedule: object = { + $type: "yandex.cloud.loadtesting.agent.v1.LoadSchedule", + type: 0, + instances: 0, + duration: "", + rpsFrom: 0, + rpsTo: 0, + step: 0, + ops: 0, + stpdPath: "", +}; + +export const LoadSchedule = { + $type: "yandex.cloud.loadtesting.agent.v1.LoadSchedule" as const, + + encode( + message: LoadSchedule, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.type !== 0) { + writer.uint32(8).int32(message.type); + } + if (message.instances !== 0) { + writer.uint32(16).int64(message.instances); + } + if (message.duration !== "") { + writer.uint32(26).string(message.duration); + } + if (message.rpsFrom !== 0) { + writer.uint32(32).int64(message.rpsFrom); + } + if (message.rpsTo !== 0) { + writer.uint32(40).int64(message.rpsTo); + } + if (message.step !== 0) { + writer.uint32(48).int64(message.step); + } + if (message.ops !== 0) { + writer.uint32(56).int64(message.ops); + } + if (message.stpdPath !== "") { + writer.uint32(66).string(message.stpdPath); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LoadSchedule { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLoadSchedule } as LoadSchedule; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32() as any; + break; + case 2: + message.instances = longToNumber(reader.int64() as Long); + break; + case 3: + message.duration = reader.string(); + break; + case 4: + message.rpsFrom = longToNumber(reader.int64() as Long); + break; + case 5: + message.rpsTo = longToNumber(reader.int64() as Long); + break; + case 6: + message.step = longToNumber(reader.int64() as Long); + break; + case 7: + message.ops = longToNumber(reader.int64() as Long); + break; + case 8: + message.stpdPath = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LoadSchedule { + const message = { ...baseLoadSchedule } as LoadSchedule; + message.type = + object.type !== undefined && object.type !== null + ? scheduleTypeFromJSON(object.type) + : 0; + message.instances = + object.instances !== undefined && object.instances !== null + ? Number(object.instances) + : 0; + message.duration = + object.duration !== undefined && object.duration !== null + ? String(object.duration) + : ""; + message.rpsFrom = + object.rpsFrom !== undefined && object.rpsFrom !== null + ? Number(object.rpsFrom) + : 0; + message.rpsTo = + object.rpsTo !== undefined && object.rpsTo !== null + ? Number(object.rpsTo) + : 0; + message.step = + object.step !== undefined && object.step !== null + ? Number(object.step) + : 0; + message.ops = + object.ops !== undefined && object.ops !== null ? Number(object.ops) : 0; + message.stpdPath = + object.stpdPath !== undefined && object.stpdPath !== null + ? String(object.stpdPath) + : ""; + return message; + }, + + toJSON(message: LoadSchedule): unknown { + const obj: any = {}; + message.type !== undefined && (obj.type = scheduleTypeToJSON(message.type)); + message.instances !== undefined && + (obj.instances = Math.round(message.instances)); + message.duration !== undefined && (obj.duration = message.duration); + message.rpsFrom !== undefined && + (obj.rpsFrom = Math.round(message.rpsFrom)); + message.rpsTo !== undefined && (obj.rpsTo = Math.round(message.rpsTo)); + message.step !== undefined && (obj.step = Math.round(message.step)); + message.ops !== undefined && (obj.ops = Math.round(message.ops)); + message.stpdPath !== undefined && (obj.stpdPath = message.stpdPath); + return obj; + }, + + fromPartial, I>>( + object: I + ): LoadSchedule { + const message = { ...baseLoadSchedule } as LoadSchedule; + message.type = object.type ?? 0; + message.instances = object.instances ?? 0; + message.duration = object.duration ?? ""; + message.rpsFrom = object.rpsFrom ?? 0; + message.rpsTo = object.rpsTo ?? 0; + message.step = object.step ?? 0; + message.ops = object.ops ?? 0; + message.stpdPath = object.stpdPath ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(LoadSchedule.$type, LoadSchedule); + +const baseSchedule: object = { + $type: "yandex.cloud.loadtesting.agent.v1.Schedule", + loadType: 0, + loadProfile: "", +}; + +export const Schedule = { + $type: "yandex.cloud.loadtesting.agent.v1.Schedule" as const, + + encode( + message: Schedule, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.loadType !== 0) { + writer.uint32(8).int32(message.loadType); + } + for (const v of message.loadSchedule) { + LoadSchedule.encode(v!, writer.uint32(18).fork()).ldelim(); + } + for (const v of message.loadProfile) { + writer.uint32(26).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Schedule { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSchedule } as Schedule; + message.loadSchedule = []; + message.loadProfile = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.loadType = reader.int32() as any; + break; + case 2: + message.loadSchedule.push( + LoadSchedule.decode(reader, reader.uint32()) + ); + break; + case 3: + message.loadProfile.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Schedule { + const message = { ...baseSchedule } as Schedule; + message.loadType = + object.loadType !== undefined && object.loadType !== null + ? loadTypeFromJSON(object.loadType) + : 0; + message.loadSchedule = (object.loadSchedule ?? []).map((e: any) => + LoadSchedule.fromJSON(e) + ); + message.loadProfile = (object.loadProfile ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: Schedule): unknown { + const obj: any = {}; + message.loadType !== undefined && + (obj.loadType = loadTypeToJSON(message.loadType)); + if (message.loadSchedule) { + obj.loadSchedule = message.loadSchedule.map((e) => + e ? LoadSchedule.toJSON(e) : undefined + ); + } else { + obj.loadSchedule = []; + } + if (message.loadProfile) { + obj.loadProfile = message.loadProfile.map((e) => e); + } else { + obj.loadProfile = []; + } + return obj; + }, + + fromPartial, I>>(object: I): Schedule { + const message = { ...baseSchedule } as Schedule; + message.loadType = object.loadType ?? 0; + message.loadSchedule = + object.loadSchedule?.map((e) => LoadSchedule.fromPartial(e)) || []; + message.loadProfile = object.loadProfile?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(Schedule.$type, Schedule); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/agent/v1/test_service.ts b/src/generated/yandex/cloud/loadtesting/agent/v1/test_service.ts new file mode 100644 index 00000000..61d8b234 --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/agent/v1/test_service.ts @@ -0,0 +1,1193 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + Test_Generator, + Schedule, + AmmoType, + Test, + test_GeneratorFromJSON, + ammoTypeFromJSON, + test_GeneratorToJSON, + ammoTypeToJSON, +} from "../../../../../yandex/cloud/loadtesting/agent/v1/test"; +import { FieldMask } from "../../../../../google/protobuf/field_mask"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.loadtesting.agent.v1"; + +export interface GetTestRequest { + $type: "yandex.cloud.loadtesting.agent.v1.GetTestRequest"; + testId: string; +} + +export interface CreateTestRequest { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTestRequest"; + folderId: string; + name: string; + description: string; + labels: { [key: string]: string }; + generator: Test_Generator; + agentInstanceId: string; + /** + * Fields for TestConfig creation. These fields have the higher priority than yaml config. + * These fields are taken from Form + */ + targetAddress: string; + targetPort: number; + targetVersion: string; + instances: number; + loadSchedule?: Schedule; + config: string; + ammoId: string; + ammoUrls: string[]; + ammoHeaders: string[]; + ammoType: AmmoType; + ssl: boolean; + imbalancePoint: number; + imbalanceTs: number; + loggingLogGroupId: string; +} + +export interface CreateTestRequest_LabelsEntry { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTestRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateTestMetadata { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTestMetadata"; + testId: string; +} + +export interface UpdateTestRequest { + $type: "yandex.cloud.loadtesting.agent.v1.UpdateTestRequest"; + testId: string; + updateMask?: FieldMask; + name: string; + description: string; + labels: { [key: string]: string }; + favorite: boolean; + targetVersion: string; + imbalancePoint: number; + imbalanceTs: number; + imbalanceComment: string; +} + +export interface UpdateTestRequest_LabelsEntry { + $type: "yandex.cloud.loadtesting.agent.v1.UpdateTestRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateTestMetadata { + $type: "yandex.cloud.loadtesting.agent.v1.UpdateTestMetadata"; + testId: string; +} + +const baseGetTestRequest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.GetTestRequest", + testId: "", +}; + +export const GetTestRequest = { + $type: "yandex.cloud.loadtesting.agent.v1.GetTestRequest" as const, + + encode( + message: GetTestRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.testId !== "") { + writer.uint32(10).string(message.testId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetTestRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetTestRequest } as GetTestRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.testId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetTestRequest { + const message = { ...baseGetTestRequest } as GetTestRequest; + message.testId = + object.testId !== undefined && object.testId !== null + ? String(object.testId) + : ""; + return message; + }, + + toJSON(message: GetTestRequest): unknown { + const obj: any = {}; + message.testId !== undefined && (obj.testId = message.testId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetTestRequest { + const message = { ...baseGetTestRequest } as GetTestRequest; + message.testId = object.testId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetTestRequest.$type, GetTestRequest); + +const baseCreateTestRequest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTestRequest", + folderId: "", + name: "", + description: "", + generator: 0, + agentInstanceId: "", + targetAddress: "", + targetPort: 0, + targetVersion: "", + instances: 0, + config: "", + ammoId: "", + ammoUrls: "", + ammoHeaders: "", + ammoType: 0, + ssl: false, + imbalancePoint: 0, + imbalanceTs: 0, + loggingLogGroupId: "", +}; + +export const CreateTestRequest = { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTestRequest" as const, + + encode( + message: CreateTestRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateTestRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.loadtesting.agent.v1.CreateTestRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.generator !== 0) { + writer.uint32(40).int32(message.generator); + } + if (message.agentInstanceId !== "") { + writer.uint32(50).string(message.agentInstanceId); + } + if (message.targetAddress !== "") { + writer.uint32(58).string(message.targetAddress); + } + if (message.targetPort !== 0) { + writer.uint32(64).int64(message.targetPort); + } + if (message.targetVersion !== "") { + writer.uint32(74).string(message.targetVersion); + } + if (message.instances !== 0) { + writer.uint32(80).int64(message.instances); + } + if (message.loadSchedule !== undefined) { + Schedule.encode(message.loadSchedule, writer.uint32(90).fork()).ldelim(); + } + if (message.config !== "") { + writer.uint32(98).string(message.config); + } + if (message.ammoId !== "") { + writer.uint32(106).string(message.ammoId); + } + for (const v of message.ammoUrls) { + writer.uint32(114).string(v!); + } + for (const v of message.ammoHeaders) { + writer.uint32(122).string(v!); + } + if (message.ammoType !== 0) { + writer.uint32(128).int32(message.ammoType); + } + if (message.ssl === true) { + writer.uint32(136).bool(message.ssl); + } + if (message.imbalancePoint !== 0) { + writer.uint32(144).int64(message.imbalancePoint); + } + if (message.imbalanceTs !== 0) { + writer.uint32(152).int64(message.imbalanceTs); + } + if (message.loggingLogGroupId !== "") { + writer.uint32(162).string(message.loggingLogGroupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateTestRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateTestRequest } as CreateTestRequest; + message.labels = {}; + message.ammoUrls = []; + message.ammoHeaders = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = CreateTestRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.generator = reader.int32() as any; + break; + case 6: + message.agentInstanceId = reader.string(); + break; + case 7: + message.targetAddress = reader.string(); + break; + case 8: + message.targetPort = longToNumber(reader.int64() as Long); + break; + case 9: + message.targetVersion = reader.string(); + break; + case 10: + message.instances = longToNumber(reader.int64() as Long); + break; + case 11: + message.loadSchedule = Schedule.decode(reader, reader.uint32()); + break; + case 12: + message.config = reader.string(); + break; + case 13: + message.ammoId = reader.string(); + break; + case 14: + message.ammoUrls.push(reader.string()); + break; + case 15: + message.ammoHeaders.push(reader.string()); + break; + case 16: + message.ammoType = reader.int32() as any; + break; + case 17: + message.ssl = reader.bool(); + break; + case 18: + message.imbalancePoint = longToNumber(reader.int64() as Long); + break; + case 19: + message.imbalanceTs = longToNumber(reader.int64() as Long); + break; + case 20: + message.loggingLogGroupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateTestRequest { + const message = { ...baseCreateTestRequest } as CreateTestRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.generator = + object.generator !== undefined && object.generator !== null + ? test_GeneratorFromJSON(object.generator) + : 0; + message.agentInstanceId = + object.agentInstanceId !== undefined && object.agentInstanceId !== null + ? String(object.agentInstanceId) + : ""; + message.targetAddress = + object.targetAddress !== undefined && object.targetAddress !== null + ? String(object.targetAddress) + : ""; + message.targetPort = + object.targetPort !== undefined && object.targetPort !== null + ? Number(object.targetPort) + : 0; + message.targetVersion = + object.targetVersion !== undefined && object.targetVersion !== null + ? String(object.targetVersion) + : ""; + message.instances = + object.instances !== undefined && object.instances !== null + ? Number(object.instances) + : 0; + message.loadSchedule = + object.loadSchedule !== undefined && object.loadSchedule !== null + ? Schedule.fromJSON(object.loadSchedule) + : undefined; + message.config = + object.config !== undefined && object.config !== null + ? String(object.config) + : ""; + message.ammoId = + object.ammoId !== undefined && object.ammoId !== null + ? String(object.ammoId) + : ""; + message.ammoUrls = (object.ammoUrls ?? []).map((e: any) => String(e)); + message.ammoHeaders = (object.ammoHeaders ?? []).map((e: any) => String(e)); + message.ammoType = + object.ammoType !== undefined && object.ammoType !== null + ? ammoTypeFromJSON(object.ammoType) + : 0; + message.ssl = + object.ssl !== undefined && object.ssl !== null + ? Boolean(object.ssl) + : false; + message.imbalancePoint = + object.imbalancePoint !== undefined && object.imbalancePoint !== null + ? Number(object.imbalancePoint) + : 0; + message.imbalanceTs = + object.imbalanceTs !== undefined && object.imbalanceTs !== null + ? Number(object.imbalanceTs) + : 0; + message.loggingLogGroupId = + object.loggingLogGroupId !== undefined && + object.loggingLogGroupId !== null + ? String(object.loggingLogGroupId) + : ""; + return message; + }, + + toJSON(message: CreateTestRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.generator !== undefined && + (obj.generator = test_GeneratorToJSON(message.generator)); + message.agentInstanceId !== undefined && + (obj.agentInstanceId = message.agentInstanceId); + message.targetAddress !== undefined && + (obj.targetAddress = message.targetAddress); + message.targetPort !== undefined && + (obj.targetPort = Math.round(message.targetPort)); + message.targetVersion !== undefined && + (obj.targetVersion = message.targetVersion); + message.instances !== undefined && + (obj.instances = Math.round(message.instances)); + message.loadSchedule !== undefined && + (obj.loadSchedule = message.loadSchedule + ? Schedule.toJSON(message.loadSchedule) + : undefined); + message.config !== undefined && (obj.config = message.config); + message.ammoId !== undefined && (obj.ammoId = message.ammoId); + if (message.ammoUrls) { + obj.ammoUrls = message.ammoUrls.map((e) => e); + } else { + obj.ammoUrls = []; + } + if (message.ammoHeaders) { + obj.ammoHeaders = message.ammoHeaders.map((e) => e); + } else { + obj.ammoHeaders = []; + } + message.ammoType !== undefined && + (obj.ammoType = ammoTypeToJSON(message.ammoType)); + message.ssl !== undefined && (obj.ssl = message.ssl); + message.imbalancePoint !== undefined && + (obj.imbalancePoint = Math.round(message.imbalancePoint)); + message.imbalanceTs !== undefined && + (obj.imbalanceTs = Math.round(message.imbalanceTs)); + message.loggingLogGroupId !== undefined && + (obj.loggingLogGroupId = message.loggingLogGroupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateTestRequest { + const message = { ...baseCreateTestRequest } as CreateTestRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.generator = object.generator ?? 0; + message.agentInstanceId = object.agentInstanceId ?? ""; + message.targetAddress = object.targetAddress ?? ""; + message.targetPort = object.targetPort ?? 0; + message.targetVersion = object.targetVersion ?? ""; + message.instances = object.instances ?? 0; + message.loadSchedule = + object.loadSchedule !== undefined && object.loadSchedule !== null + ? Schedule.fromPartial(object.loadSchedule) + : undefined; + message.config = object.config ?? ""; + message.ammoId = object.ammoId ?? ""; + message.ammoUrls = object.ammoUrls?.map((e) => e) || []; + message.ammoHeaders = object.ammoHeaders?.map((e) => e) || []; + message.ammoType = object.ammoType ?? 0; + message.ssl = object.ssl ?? false; + message.imbalancePoint = object.imbalancePoint ?? 0; + message.imbalanceTs = object.imbalanceTs ?? 0; + message.loggingLogGroupId = object.loggingLogGroupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateTestRequest.$type, CreateTestRequest); + +const baseCreateTestRequest_LabelsEntry: object = { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTestRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateTestRequest_LabelsEntry = { + $type: + "yandex.cloud.loadtesting.agent.v1.CreateTestRequest.LabelsEntry" as const, + + encode( + message: CreateTestRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateTestRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateTestRequest_LabelsEntry, + } as CreateTestRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateTestRequest_LabelsEntry { + const message = { + ...baseCreateTestRequest_LabelsEntry, + } as CreateTestRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateTestRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateTestRequest_LabelsEntry { + const message = { + ...baseCreateTestRequest_LabelsEntry, + } as CreateTestRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateTestRequest_LabelsEntry.$type, + CreateTestRequest_LabelsEntry +); + +const baseCreateTestMetadata: object = { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTestMetadata", + testId: "", +}; + +export const CreateTestMetadata = { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTestMetadata" as const, + + encode( + message: CreateTestMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.testId !== "") { + writer.uint32(10).string(message.testId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateTestMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateTestMetadata } as CreateTestMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.testId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateTestMetadata { + const message = { ...baseCreateTestMetadata } as CreateTestMetadata; + message.testId = + object.testId !== undefined && object.testId !== null + ? String(object.testId) + : ""; + return message; + }, + + toJSON(message: CreateTestMetadata): unknown { + const obj: any = {}; + message.testId !== undefined && (obj.testId = message.testId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateTestMetadata { + const message = { ...baseCreateTestMetadata } as CreateTestMetadata; + message.testId = object.testId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateTestMetadata.$type, CreateTestMetadata); + +const baseUpdateTestRequest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.UpdateTestRequest", + testId: "", + name: "", + description: "", + favorite: false, + targetVersion: "", + imbalancePoint: 0, + imbalanceTs: 0, + imbalanceComment: "", +}; + +export const UpdateTestRequest = { + $type: "yandex.cloud.loadtesting.agent.v1.UpdateTestRequest" as const, + + encode( + message: UpdateTestRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.testId !== "") { + writer.uint32(10).string(message.testId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateTestRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.loadtesting.agent.v1.UpdateTestRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + if (message.favorite === true) { + writer.uint32(48).bool(message.favorite); + } + if (message.targetVersion !== "") { + writer.uint32(58).string(message.targetVersion); + } + if (message.imbalancePoint !== 0) { + writer.uint32(64).int64(message.imbalancePoint); + } + if (message.imbalanceTs !== 0) { + writer.uint32(72).int64(message.imbalanceTs); + } + if (message.imbalanceComment !== "") { + writer.uint32(82).string(message.imbalanceComment); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateTestRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateTestRequest } as UpdateTestRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.testId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = UpdateTestRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + case 6: + message.favorite = reader.bool(); + break; + case 7: + message.targetVersion = reader.string(); + break; + case 8: + message.imbalancePoint = longToNumber(reader.int64() as Long); + break; + case 9: + message.imbalanceTs = longToNumber(reader.int64() as Long); + break; + case 10: + message.imbalanceComment = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateTestRequest { + const message = { ...baseUpdateTestRequest } as UpdateTestRequest; + message.testId = + object.testId !== undefined && object.testId !== null + ? String(object.testId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.favorite = + object.favorite !== undefined && object.favorite !== null + ? Boolean(object.favorite) + : false; + message.targetVersion = + object.targetVersion !== undefined && object.targetVersion !== null + ? String(object.targetVersion) + : ""; + message.imbalancePoint = + object.imbalancePoint !== undefined && object.imbalancePoint !== null + ? Number(object.imbalancePoint) + : 0; + message.imbalanceTs = + object.imbalanceTs !== undefined && object.imbalanceTs !== null + ? Number(object.imbalanceTs) + : 0; + message.imbalanceComment = + object.imbalanceComment !== undefined && object.imbalanceComment !== null + ? String(object.imbalanceComment) + : ""; + return message; + }, + + toJSON(message: UpdateTestRequest): unknown { + const obj: any = {}; + message.testId !== undefined && (obj.testId = message.testId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.favorite !== undefined && (obj.favorite = message.favorite); + message.targetVersion !== undefined && + (obj.targetVersion = message.targetVersion); + message.imbalancePoint !== undefined && + (obj.imbalancePoint = Math.round(message.imbalancePoint)); + message.imbalanceTs !== undefined && + (obj.imbalanceTs = Math.round(message.imbalanceTs)); + message.imbalanceComment !== undefined && + (obj.imbalanceComment = message.imbalanceComment); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateTestRequest { + const message = { ...baseUpdateTestRequest } as UpdateTestRequest; + message.testId = object.testId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.favorite = object.favorite ?? false; + message.targetVersion = object.targetVersion ?? ""; + message.imbalancePoint = object.imbalancePoint ?? 0; + message.imbalanceTs = object.imbalanceTs ?? 0; + message.imbalanceComment = object.imbalanceComment ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateTestRequest.$type, UpdateTestRequest); + +const baseUpdateTestRequest_LabelsEntry: object = { + $type: "yandex.cloud.loadtesting.agent.v1.UpdateTestRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateTestRequest_LabelsEntry = { + $type: + "yandex.cloud.loadtesting.agent.v1.UpdateTestRequest.LabelsEntry" as const, + + encode( + message: UpdateTestRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateTestRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateTestRequest_LabelsEntry, + } as UpdateTestRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateTestRequest_LabelsEntry { + const message = { + ...baseUpdateTestRequest_LabelsEntry, + } as UpdateTestRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateTestRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateTestRequest_LabelsEntry { + const message = { + ...baseUpdateTestRequest_LabelsEntry, + } as UpdateTestRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateTestRequest_LabelsEntry.$type, + UpdateTestRequest_LabelsEntry +); + +const baseUpdateTestMetadata: object = { + $type: "yandex.cloud.loadtesting.agent.v1.UpdateTestMetadata", + testId: "", +}; + +export const UpdateTestMetadata = { + $type: "yandex.cloud.loadtesting.agent.v1.UpdateTestMetadata" as const, + + encode( + message: UpdateTestMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.testId !== "") { + writer.uint32(10).string(message.testId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateTestMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateTestMetadata } as UpdateTestMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.testId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateTestMetadata { + const message = { ...baseUpdateTestMetadata } as UpdateTestMetadata; + message.testId = + object.testId !== undefined && object.testId !== null + ? String(object.testId) + : ""; + return message; + }, + + toJSON(message: UpdateTestMetadata): unknown { + const obj: any = {}; + message.testId !== undefined && (obj.testId = message.testId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateTestMetadata { + const message = { ...baseUpdateTestMetadata } as UpdateTestMetadata; + message.testId = object.testId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateTestMetadata.$type, UpdateTestMetadata); + +export const TestServiceService = { + /** Returns test by test id. */ + get: { + path: "/yandex.cloud.loadtesting.agent.v1.TestService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetTestRequest) => + Buffer.from(GetTestRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetTestRequest.decode(value), + responseSerialize: (value: Test) => + Buffer.from(Test.encode(value).finish()), + responseDeserialize: (value: Buffer) => Test.decode(value), + }, + /** Creates test for the specified folder. */ + create: { + path: "/yandex.cloud.loadtesting.agent.v1.TestService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateTestRequest) => + Buffer.from(CreateTestRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateTestRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified test. */ + update: { + path: "/yandex.cloud.loadtesting.agent.v1.TestService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateTestRequest) => + Buffer.from(UpdateTestRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateTestRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface TestServiceServer extends UntypedServiceImplementation { + /** Returns test by test id. */ + get: handleUnaryCall; + /** Creates test for the specified folder. */ + create: handleUnaryCall; + /** Updates the specified test. */ + update: handleUnaryCall; +} + +export interface TestServiceClient extends Client { + /** Returns test by test id. */ + get( + request: GetTestRequest, + callback: (error: ServiceError | null, response: Test) => void + ): ClientUnaryCall; + get( + request: GetTestRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Test) => void + ): ClientUnaryCall; + get( + request: GetTestRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Test) => void + ): ClientUnaryCall; + /** Creates test for the specified folder. */ + create( + request: CreateTestRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateTestRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateTestRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified test. */ + update( + request: UpdateTestRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateTestRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateTestRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const TestServiceClient = makeGenericClientConstructor( + TestServiceService, + "yandex.cloud.loadtesting.agent.v1.TestService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): TestServiceClient; + service: typeof TestServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/agent/v1/trail_service.ts b/src/generated/yandex/cloud/loadtesting/agent/v1/trail_service.ts new file mode 100644 index 00000000..c5241b8e --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/agent/v1/trail_service.ts @@ -0,0 +1,888 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.loadtesting.agent.v1"; + +export interface CreateTrailRequest { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTrailRequest"; + computeInstanceId: string; + data: Trail[]; + jobId: string; + agentInstanceId: string; +} + +export interface Trail { + $type: "yandex.cloud.loadtesting.agent.v1.Trail"; + overall: number; + caseId: string; + time: string; + reqps: number; + resps: number; + expect: number; + input: number; + output: number; + connectTime: number; + sendTime: number; + latency: number; + receiveTime: number; + threads: number; + q50: number; + q75: number; + q80: number; + q85: number; + q90: number; + q95: number; + q98: number; + q99: number; + q100: number; + httpCodes: Trail_Codes[]; + netCodes: Trail_Codes[]; + timeIntervals: Trail_Intervals[]; +} + +export interface Trail_Codes { + $type: "yandex.cloud.loadtesting.agent.v1.Trail.Codes"; + code: number; + count: number; +} + +export interface Trail_Intervals { + $type: "yandex.cloud.loadtesting.agent.v1.Trail.Intervals"; + to: number; + count: number; +} + +export interface CreateTrailResponse { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTrailResponse"; + trailId: string; + code: number; +} + +const baseCreateTrailRequest: object = { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTrailRequest", + computeInstanceId: "", + jobId: "", + agentInstanceId: "", +}; + +export const CreateTrailRequest = { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTrailRequest" as const, + + encode( + message: CreateTrailRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.computeInstanceId !== "") { + writer.uint32(10).string(message.computeInstanceId); + } + for (const v of message.data) { + Trail.encode(v!, writer.uint32(18).fork()).ldelim(); + } + if (message.jobId !== "") { + writer.uint32(26).string(message.jobId); + } + if (message.agentInstanceId !== "") { + writer.uint32(34).string(message.agentInstanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateTrailRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateTrailRequest } as CreateTrailRequest; + message.data = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.computeInstanceId = reader.string(); + break; + case 2: + message.data.push(Trail.decode(reader, reader.uint32())); + break; + case 3: + message.jobId = reader.string(); + break; + case 4: + message.agentInstanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateTrailRequest { + const message = { ...baseCreateTrailRequest } as CreateTrailRequest; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.data = (object.data ?? []).map((e: any) => Trail.fromJSON(e)); + message.jobId = + object.jobId !== undefined && object.jobId !== null + ? String(object.jobId) + : ""; + message.agentInstanceId = + object.agentInstanceId !== undefined && object.agentInstanceId !== null + ? String(object.agentInstanceId) + : ""; + return message; + }, + + toJSON(message: CreateTrailRequest): unknown { + const obj: any = {}; + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + if (message.data) { + obj.data = message.data.map((e) => (e ? Trail.toJSON(e) : undefined)); + } else { + obj.data = []; + } + message.jobId !== undefined && (obj.jobId = message.jobId); + message.agentInstanceId !== undefined && + (obj.agentInstanceId = message.agentInstanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateTrailRequest { + const message = { ...baseCreateTrailRequest } as CreateTrailRequest; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.data = object.data?.map((e) => Trail.fromPartial(e)) || []; + message.jobId = object.jobId ?? ""; + message.agentInstanceId = object.agentInstanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateTrailRequest.$type, CreateTrailRequest); + +const baseTrail: object = { + $type: "yandex.cloud.loadtesting.agent.v1.Trail", + overall: 0, + caseId: "", + time: "", + reqps: 0, + resps: 0, + expect: 0, + input: 0, + output: 0, + connectTime: 0, + sendTime: 0, + latency: 0, + receiveTime: 0, + threads: 0, + q50: 0, + q75: 0, + q80: 0, + q85: 0, + q90: 0, + q95: 0, + q98: 0, + q99: 0, + q100: 0, +}; + +export const Trail = { + $type: "yandex.cloud.loadtesting.agent.v1.Trail" as const, + + encode(message: Trail, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.overall !== 0) { + writer.uint32(8).int64(message.overall); + } + if (message.caseId !== "") { + writer.uint32(18).string(message.caseId); + } + if (message.time !== "") { + writer.uint32(26).string(message.time); + } + if (message.reqps !== 0) { + writer.uint32(32).int64(message.reqps); + } + if (message.resps !== 0) { + writer.uint32(40).int64(message.resps); + } + if (message.expect !== 0) { + writer.uint32(49).double(message.expect); + } + if (message.input !== 0) { + writer.uint32(56).int64(message.input); + } + if (message.output !== 0) { + writer.uint32(64).int64(message.output); + } + if (message.connectTime !== 0) { + writer.uint32(73).double(message.connectTime); + } + if (message.sendTime !== 0) { + writer.uint32(81).double(message.sendTime); + } + if (message.latency !== 0) { + writer.uint32(89).double(message.latency); + } + if (message.receiveTime !== 0) { + writer.uint32(97).double(message.receiveTime); + } + if (message.threads !== 0) { + writer.uint32(104).int64(message.threads); + } + if (message.q50 !== 0) { + writer.uint32(113).double(message.q50); + } + if (message.q75 !== 0) { + writer.uint32(121).double(message.q75); + } + if (message.q80 !== 0) { + writer.uint32(129).double(message.q80); + } + if (message.q85 !== 0) { + writer.uint32(137).double(message.q85); + } + if (message.q90 !== 0) { + writer.uint32(145).double(message.q90); + } + if (message.q95 !== 0) { + writer.uint32(153).double(message.q95); + } + if (message.q98 !== 0) { + writer.uint32(161).double(message.q98); + } + if (message.q99 !== 0) { + writer.uint32(169).double(message.q99); + } + if (message.q100 !== 0) { + writer.uint32(177).double(message.q100); + } + for (const v of message.httpCodes) { + Trail_Codes.encode(v!, writer.uint32(186).fork()).ldelim(); + } + for (const v of message.netCodes) { + Trail_Codes.encode(v!, writer.uint32(194).fork()).ldelim(); + } + for (const v of message.timeIntervals) { + Trail_Intervals.encode(v!, writer.uint32(202).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Trail { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTrail } as Trail; + message.httpCodes = []; + message.netCodes = []; + message.timeIntervals = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.overall = longToNumber(reader.int64() as Long); + break; + case 2: + message.caseId = reader.string(); + break; + case 3: + message.time = reader.string(); + break; + case 4: + message.reqps = longToNumber(reader.int64() as Long); + break; + case 5: + message.resps = longToNumber(reader.int64() as Long); + break; + case 6: + message.expect = reader.double(); + break; + case 7: + message.input = longToNumber(reader.int64() as Long); + break; + case 8: + message.output = longToNumber(reader.int64() as Long); + break; + case 9: + message.connectTime = reader.double(); + break; + case 10: + message.sendTime = reader.double(); + break; + case 11: + message.latency = reader.double(); + break; + case 12: + message.receiveTime = reader.double(); + break; + case 13: + message.threads = longToNumber(reader.int64() as Long); + break; + case 14: + message.q50 = reader.double(); + break; + case 15: + message.q75 = reader.double(); + break; + case 16: + message.q80 = reader.double(); + break; + case 17: + message.q85 = reader.double(); + break; + case 18: + message.q90 = reader.double(); + break; + case 19: + message.q95 = reader.double(); + break; + case 20: + message.q98 = reader.double(); + break; + case 21: + message.q99 = reader.double(); + break; + case 22: + message.q100 = reader.double(); + break; + case 23: + message.httpCodes.push(Trail_Codes.decode(reader, reader.uint32())); + break; + case 24: + message.netCodes.push(Trail_Codes.decode(reader, reader.uint32())); + break; + case 25: + message.timeIntervals.push( + Trail_Intervals.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Trail { + const message = { ...baseTrail } as Trail; + message.overall = + object.overall !== undefined && object.overall !== null + ? Number(object.overall) + : 0; + message.caseId = + object.caseId !== undefined && object.caseId !== null + ? String(object.caseId) + : ""; + message.time = + object.time !== undefined && object.time !== null + ? String(object.time) + : ""; + message.reqps = + object.reqps !== undefined && object.reqps !== null + ? Number(object.reqps) + : 0; + message.resps = + object.resps !== undefined && object.resps !== null + ? Number(object.resps) + : 0; + message.expect = + object.expect !== undefined && object.expect !== null + ? Number(object.expect) + : 0; + message.input = + object.input !== undefined && object.input !== null + ? Number(object.input) + : 0; + message.output = + object.output !== undefined && object.output !== null + ? Number(object.output) + : 0; + message.connectTime = + object.connectTime !== undefined && object.connectTime !== null + ? Number(object.connectTime) + : 0; + message.sendTime = + object.sendTime !== undefined && object.sendTime !== null + ? Number(object.sendTime) + : 0; + message.latency = + object.latency !== undefined && object.latency !== null + ? Number(object.latency) + : 0; + message.receiveTime = + object.receiveTime !== undefined && object.receiveTime !== null + ? Number(object.receiveTime) + : 0; + message.threads = + object.threads !== undefined && object.threads !== null + ? Number(object.threads) + : 0; + message.q50 = + object.q50 !== undefined && object.q50 !== null ? Number(object.q50) : 0; + message.q75 = + object.q75 !== undefined && object.q75 !== null ? Number(object.q75) : 0; + message.q80 = + object.q80 !== undefined && object.q80 !== null ? Number(object.q80) : 0; + message.q85 = + object.q85 !== undefined && object.q85 !== null ? Number(object.q85) : 0; + message.q90 = + object.q90 !== undefined && object.q90 !== null ? Number(object.q90) : 0; + message.q95 = + object.q95 !== undefined && object.q95 !== null ? Number(object.q95) : 0; + message.q98 = + object.q98 !== undefined && object.q98 !== null ? Number(object.q98) : 0; + message.q99 = + object.q99 !== undefined && object.q99 !== null ? Number(object.q99) : 0; + message.q100 = + object.q100 !== undefined && object.q100 !== null + ? Number(object.q100) + : 0; + message.httpCodes = (object.httpCodes ?? []).map((e: any) => + Trail_Codes.fromJSON(e) + ); + message.netCodes = (object.netCodes ?? []).map((e: any) => + Trail_Codes.fromJSON(e) + ); + message.timeIntervals = (object.timeIntervals ?? []).map((e: any) => + Trail_Intervals.fromJSON(e) + ); + return message; + }, + + toJSON(message: Trail): unknown { + const obj: any = {}; + message.overall !== undefined && + (obj.overall = Math.round(message.overall)); + message.caseId !== undefined && (obj.caseId = message.caseId); + message.time !== undefined && (obj.time = message.time); + message.reqps !== undefined && (obj.reqps = Math.round(message.reqps)); + message.resps !== undefined && (obj.resps = Math.round(message.resps)); + message.expect !== undefined && (obj.expect = message.expect); + message.input !== undefined && (obj.input = Math.round(message.input)); + message.output !== undefined && (obj.output = Math.round(message.output)); + message.connectTime !== undefined && + (obj.connectTime = message.connectTime); + message.sendTime !== undefined && (obj.sendTime = message.sendTime); + message.latency !== undefined && (obj.latency = message.latency); + message.receiveTime !== undefined && + (obj.receiveTime = message.receiveTime); + message.threads !== undefined && + (obj.threads = Math.round(message.threads)); + message.q50 !== undefined && (obj.q50 = message.q50); + message.q75 !== undefined && (obj.q75 = message.q75); + message.q80 !== undefined && (obj.q80 = message.q80); + message.q85 !== undefined && (obj.q85 = message.q85); + message.q90 !== undefined && (obj.q90 = message.q90); + message.q95 !== undefined && (obj.q95 = message.q95); + message.q98 !== undefined && (obj.q98 = message.q98); + message.q99 !== undefined && (obj.q99 = message.q99); + message.q100 !== undefined && (obj.q100 = message.q100); + if (message.httpCodes) { + obj.httpCodes = message.httpCodes.map((e) => + e ? Trail_Codes.toJSON(e) : undefined + ); + } else { + obj.httpCodes = []; + } + if (message.netCodes) { + obj.netCodes = message.netCodes.map((e) => + e ? Trail_Codes.toJSON(e) : undefined + ); + } else { + obj.netCodes = []; + } + if (message.timeIntervals) { + obj.timeIntervals = message.timeIntervals.map((e) => + e ? Trail_Intervals.toJSON(e) : undefined + ); + } else { + obj.timeIntervals = []; + } + return obj; + }, + + fromPartial, I>>(object: I): Trail { + const message = { ...baseTrail } as Trail; + message.overall = object.overall ?? 0; + message.caseId = object.caseId ?? ""; + message.time = object.time ?? ""; + message.reqps = object.reqps ?? 0; + message.resps = object.resps ?? 0; + message.expect = object.expect ?? 0; + message.input = object.input ?? 0; + message.output = object.output ?? 0; + message.connectTime = object.connectTime ?? 0; + message.sendTime = object.sendTime ?? 0; + message.latency = object.latency ?? 0; + message.receiveTime = object.receiveTime ?? 0; + message.threads = object.threads ?? 0; + message.q50 = object.q50 ?? 0; + message.q75 = object.q75 ?? 0; + message.q80 = object.q80 ?? 0; + message.q85 = object.q85 ?? 0; + message.q90 = object.q90 ?? 0; + message.q95 = object.q95 ?? 0; + message.q98 = object.q98 ?? 0; + message.q99 = object.q99 ?? 0; + message.q100 = object.q100 ?? 0; + message.httpCodes = + object.httpCodes?.map((e) => Trail_Codes.fromPartial(e)) || []; + message.netCodes = + object.netCodes?.map((e) => Trail_Codes.fromPartial(e)) || []; + message.timeIntervals = + object.timeIntervals?.map((e) => Trail_Intervals.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Trail.$type, Trail); + +const baseTrail_Codes: object = { + $type: "yandex.cloud.loadtesting.agent.v1.Trail.Codes", + code: 0, + count: 0, +}; + +export const Trail_Codes = { + $type: "yandex.cloud.loadtesting.agent.v1.Trail.Codes" as const, + + encode( + message: Trail_Codes, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.code !== 0) { + writer.uint32(8).int64(message.code); + } + if (message.count !== 0) { + writer.uint32(16).int64(message.count); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Trail_Codes { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTrail_Codes } as Trail_Codes; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.code = longToNumber(reader.int64() as Long); + break; + case 2: + message.count = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Trail_Codes { + const message = { ...baseTrail_Codes } as Trail_Codes; + message.code = + object.code !== undefined && object.code !== null + ? Number(object.code) + : 0; + message.count = + object.count !== undefined && object.count !== null + ? Number(object.count) + : 0; + return message; + }, + + toJSON(message: Trail_Codes): unknown { + const obj: any = {}; + message.code !== undefined && (obj.code = Math.round(message.code)); + message.count !== undefined && (obj.count = Math.round(message.count)); + return obj; + }, + + fromPartial, I>>( + object: I + ): Trail_Codes { + const message = { ...baseTrail_Codes } as Trail_Codes; + message.code = object.code ?? 0; + message.count = object.count ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Trail_Codes.$type, Trail_Codes); + +const baseTrail_Intervals: object = { + $type: "yandex.cloud.loadtesting.agent.v1.Trail.Intervals", + to: 0, + count: 0, +}; + +export const Trail_Intervals = { + $type: "yandex.cloud.loadtesting.agent.v1.Trail.Intervals" as const, + + encode( + message: Trail_Intervals, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.to !== 0) { + writer.uint32(9).double(message.to); + } + if (message.count !== 0) { + writer.uint32(16).int64(message.count); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Trail_Intervals { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTrail_Intervals } as Trail_Intervals; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.to = reader.double(); + break; + case 2: + message.count = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Trail_Intervals { + const message = { ...baseTrail_Intervals } as Trail_Intervals; + message.to = + object.to !== undefined && object.to !== null ? Number(object.to) : 0; + message.count = + object.count !== undefined && object.count !== null + ? Number(object.count) + : 0; + return message; + }, + + toJSON(message: Trail_Intervals): unknown { + const obj: any = {}; + message.to !== undefined && (obj.to = message.to); + message.count !== undefined && (obj.count = Math.round(message.count)); + return obj; + }, + + fromPartial, I>>( + object: I + ): Trail_Intervals { + const message = { ...baseTrail_Intervals } as Trail_Intervals; + message.to = object.to ?? 0; + message.count = object.count ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Trail_Intervals.$type, Trail_Intervals); + +const baseCreateTrailResponse: object = { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTrailResponse", + trailId: "", + code: 0, +}; + +export const CreateTrailResponse = { + $type: "yandex.cloud.loadtesting.agent.v1.CreateTrailResponse" as const, + + encode( + message: CreateTrailResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.trailId !== "") { + writer.uint32(10).string(message.trailId); + } + if (message.code !== 0) { + writer.uint32(16).int64(message.code); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateTrailResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateTrailResponse } as CreateTrailResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.trailId = reader.string(); + break; + case 2: + message.code = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateTrailResponse { + const message = { ...baseCreateTrailResponse } as CreateTrailResponse; + message.trailId = + object.trailId !== undefined && object.trailId !== null + ? String(object.trailId) + : ""; + message.code = + object.code !== undefined && object.code !== null + ? Number(object.code) + : 0; + return message; + }, + + toJSON(message: CreateTrailResponse): unknown { + const obj: any = {}; + message.trailId !== undefined && (obj.trailId = message.trailId); + message.code !== undefined && (obj.code = Math.round(message.code)); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateTrailResponse { + const message = { ...baseCreateTrailResponse } as CreateTrailResponse; + message.trailId = object.trailId ?? ""; + message.code = object.code ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(CreateTrailResponse.$type, CreateTrailResponse); + +export const TrailServiceService = { + /** Creates trail for specified job. */ + create: { + path: "/yandex.cloud.loadtesting.agent.v1.TrailService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateTrailRequest) => + Buffer.from(CreateTrailRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateTrailRequest.decode(value), + responseSerialize: (value: CreateTrailResponse) => + Buffer.from(CreateTrailResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => CreateTrailResponse.decode(value), + }, +} as const; + +export interface TrailServiceServer extends UntypedServiceImplementation { + /** Creates trail for specified job. */ + create: handleUnaryCall; +} + +export interface TrailServiceClient extends Client { + /** Creates trail for specified job. */ + create( + request: CreateTrailRequest, + callback: ( + error: ServiceError | null, + response: CreateTrailResponse + ) => void + ): ClientUnaryCall; + create( + request: CreateTrailRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: CreateTrailResponse + ) => void + ): ClientUnaryCall; + create( + request: CreateTrailRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: CreateTrailResponse + ) => void + ): ClientUnaryCall; +} + +export const TrailServiceClient = makeGenericClientConstructor( + TrailServiceService, + "yandex.cloud.loadtesting.agent.v1.TrailService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): TrailServiceClient; + service: typeof TrailServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/api/v1/agent/agent.ts b/src/generated/yandex/cloud/loadtesting/api/v1/agent/agent.ts new file mode 100644 index 00000000..c219a8e4 --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/api/v1/agent/agent.ts @@ -0,0 +1,207 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Status, + statusFromJSON, + statusToJSON, +} from "../../../../../../yandex/cloud/loadtesting/api/v1/agent/status"; + +export const protobufPackage = "yandex.cloud.loadtesting.api.v1.agent"; + +export interface Agent { + $type: "yandex.cloud.loadtesting.api.v1.agent.Agent"; + id: string; + folderId: string; + name: string; + description: string; + computeInstanceId: string; + status: Status; + errors: string[]; + currentJobId: string; +} + +const baseAgent: object = { + $type: "yandex.cloud.loadtesting.api.v1.agent.Agent", + id: "", + folderId: "", + name: "", + description: "", + computeInstanceId: "", + status: 0, + errors: "", + currentJobId: "", +}; + +export const Agent = { + $type: "yandex.cloud.loadtesting.api.v1.agent.Agent" as const, + + encode(message: Agent, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + if (message.computeInstanceId !== "") { + writer.uint32(42).string(message.computeInstanceId); + } + if (message.status !== 0) { + writer.uint32(56).int32(message.status); + } + for (const v of message.errors) { + writer.uint32(66).string(v!); + } + if (message.currentJobId !== "") { + writer.uint32(74).string(message.currentJobId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Agent { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAgent } as Agent; + message.errors = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + message.computeInstanceId = reader.string(); + break; + case 7: + message.status = reader.int32() as any; + break; + case 8: + message.errors.push(reader.string()); + break; + case 9: + message.currentJobId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Agent { + const message = { ...baseAgent } as Agent; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.computeInstanceId = + object.computeInstanceId !== undefined && + object.computeInstanceId !== null + ? String(object.computeInstanceId) + : ""; + message.status = + object.status !== undefined && object.status !== null + ? statusFromJSON(object.status) + : 0; + message.errors = (object.errors ?? []).map((e: any) => String(e)); + message.currentJobId = + object.currentJobId !== undefined && object.currentJobId !== null + ? String(object.currentJobId) + : ""; + return message; + }, + + toJSON(message: Agent): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + message.computeInstanceId !== undefined && + (obj.computeInstanceId = message.computeInstanceId); + message.status !== undefined && (obj.status = statusToJSON(message.status)); + if (message.errors) { + obj.errors = message.errors.map((e) => e); + } else { + obj.errors = []; + } + message.currentJobId !== undefined && + (obj.currentJobId = message.currentJobId); + return obj; + }, + + fromPartial, I>>(object: I): Agent { + const message = { ...baseAgent } as Agent; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.computeInstanceId = object.computeInstanceId ?? ""; + message.status = object.status ?? 0; + message.errors = object.errors?.map((e) => e) || []; + message.currentJobId = object.currentJobId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Agent.$type, Agent); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/api/v1/agent/create_compute_instance.ts b/src/generated/yandex/cloud/loadtesting/api/v1/agent/create_compute_instance.ts new file mode 100644 index 00000000..49fb8910 --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/api/v1/agent/create_compute_instance.ts @@ -0,0 +1,498 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + ResourcesSpec, + AttachedDiskSpec, + NetworkInterfaceSpec, +} from "../../../../../../yandex/cloud/compute/v1/instance_service"; + +export const protobufPackage = "yandex.cloud.loadtesting.api.v1.agent"; + +export interface CreateComputeInstance { + $type: "yandex.cloud.loadtesting.api.v1.agent.CreateComputeInstance"; + /** Resource labels as `key:value` pairs. */ + labels: { [key: string]: string }; + /** + * ID of the availability zone where the instance resides. + * To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request + */ + zoneId: string; + /** + * Computing resources of the instance, such as the amount of memory and number of cores. + * To get a list of available values, see [Levels of core performance](/docs/compute/concepts/performance-levels). + */ + resourcesSpec?: ResourcesSpec; + /** + * The metadata `key:value` pairs that will be assigned to this instance. This includes custom metadata and predefined keys. + * The total size of all keys and values must be less than 512 KB. + * + * Values are free-form strings, and only have meaning as interpreted by the programs which configure the instance. + * The values must be 256 KB or less. + * + * For example, you may use the metadata in order to provide your public SSH key to the instance. + * For more information, see [Metadata](/docs/compute/concepts/vm-metadata). + */ + metadata: { [key: string]: string }; + /** Boot disk to attach to the instance. */ + bootDiskSpec?: AttachedDiskSpec; + /** + * Network configuration for the instance. Specifies how the network interface is configured + * to interact with other services on the internal network and on the internet. + * Currently only one network interface is supported per instance. + */ + networkInterfaceSpecs: NetworkInterfaceSpec[]; + /** + * ID of the service account to use for [authentication inside the instance](/docs/compute/operations/vm-connect/auth-inside-vm). + * To get the service account ID, use a [yandex.cloud.iam.v1.ServiceAccountService.List] request. + */ + serviceAccountId: string; +} + +export interface CreateComputeInstance_LabelsEntry { + $type: "yandex.cloud.loadtesting.api.v1.agent.CreateComputeInstance.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateComputeInstance_MetadataEntry { + $type: "yandex.cloud.loadtesting.api.v1.agent.CreateComputeInstance.MetadataEntry"; + key: string; + value: string; +} + +const baseCreateComputeInstance: object = { + $type: "yandex.cloud.loadtesting.api.v1.agent.CreateComputeInstance", + zoneId: "", + serviceAccountId: "", +}; + +export const CreateComputeInstance = { + $type: "yandex.cloud.loadtesting.api.v1.agent.CreateComputeInstance" as const, + + encode( + message: CreateComputeInstance, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + Object.entries(message.labels).forEach(([key, value]) => { + CreateComputeInstance_LabelsEntry.encode( + { + $type: + "yandex.cloud.loadtesting.api.v1.agent.CreateComputeInstance.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.zoneId !== "") { + writer.uint32(42).string(message.zoneId); + } + if (message.resourcesSpec !== undefined) { + ResourcesSpec.encode( + message.resourcesSpec, + writer.uint32(58).fork() + ).ldelim(); + } + Object.entries(message.metadata).forEach(([key, value]) => { + CreateComputeInstance_MetadataEntry.encode( + { + $type: + "yandex.cloud.loadtesting.api.v1.agent.CreateComputeInstance.MetadataEntry", + key: key as any, + value, + }, + writer.uint32(66).fork() + ).ldelim(); + }); + if (message.bootDiskSpec !== undefined) { + AttachedDiskSpec.encode( + message.bootDiskSpec, + writer.uint32(74).fork() + ).ldelim(); + } + for (const v of message.networkInterfaceSpecs) { + NetworkInterfaceSpec.encode(v!, writer.uint32(90).fork()).ldelim(); + } + if (message.serviceAccountId !== "") { + writer.uint32(114).string(message.serviceAccountId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateComputeInstance { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateComputeInstance } as CreateComputeInstance; + message.labels = {}; + message.metadata = {}; + message.networkInterfaceSpecs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 4: + const entry4 = CreateComputeInstance_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.zoneId = reader.string(); + break; + case 7: + message.resourcesSpec = ResourcesSpec.decode(reader, reader.uint32()); + break; + case 8: + const entry8 = CreateComputeInstance_MetadataEntry.decode( + reader, + reader.uint32() + ); + if (entry8.value !== undefined) { + message.metadata[entry8.key] = entry8.value; + } + break; + case 9: + message.bootDiskSpec = AttachedDiskSpec.decode( + reader, + reader.uint32() + ); + break; + case 11: + message.networkInterfaceSpecs.push( + NetworkInterfaceSpec.decode(reader, reader.uint32()) + ); + break; + case 14: + message.serviceAccountId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateComputeInstance { + const message = { ...baseCreateComputeInstance } as CreateComputeInstance; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.zoneId = + object.zoneId !== undefined && object.zoneId !== null + ? String(object.zoneId) + : ""; + message.resourcesSpec = + object.resourcesSpec !== undefined && object.resourcesSpec !== null + ? ResourcesSpec.fromJSON(object.resourcesSpec) + : undefined; + message.metadata = Object.entries(object.metadata ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.bootDiskSpec = + object.bootDiskSpec !== undefined && object.bootDiskSpec !== null + ? AttachedDiskSpec.fromJSON(object.bootDiskSpec) + : undefined; + message.networkInterfaceSpecs = (object.networkInterfaceSpecs ?? []).map( + (e: any) => NetworkInterfaceSpec.fromJSON(e) + ); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + return message; + }, + + toJSON(message: CreateComputeInstance): unknown { + const obj: any = {}; + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.zoneId !== undefined && (obj.zoneId = message.zoneId); + message.resourcesSpec !== undefined && + (obj.resourcesSpec = message.resourcesSpec + ? ResourcesSpec.toJSON(message.resourcesSpec) + : undefined); + obj.metadata = {}; + if (message.metadata) { + Object.entries(message.metadata).forEach(([k, v]) => { + obj.metadata[k] = v; + }); + } + message.bootDiskSpec !== undefined && + (obj.bootDiskSpec = message.bootDiskSpec + ? AttachedDiskSpec.toJSON(message.bootDiskSpec) + : undefined); + if (message.networkInterfaceSpecs) { + obj.networkInterfaceSpecs = message.networkInterfaceSpecs.map((e) => + e ? NetworkInterfaceSpec.toJSON(e) : undefined + ); + } else { + obj.networkInterfaceSpecs = []; + } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateComputeInstance { + const message = { ...baseCreateComputeInstance } as CreateComputeInstance; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.zoneId = object.zoneId ?? ""; + message.resourcesSpec = + object.resourcesSpec !== undefined && object.resourcesSpec !== null + ? ResourcesSpec.fromPartial(object.resourcesSpec) + : undefined; + message.metadata = Object.entries(object.metadata ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.bootDiskSpec = + object.bootDiskSpec !== undefined && object.bootDiskSpec !== null + ? AttachedDiskSpec.fromPartial(object.bootDiskSpec) + : undefined; + message.networkInterfaceSpecs = + object.networkInterfaceSpecs?.map((e) => + NetworkInterfaceSpec.fromPartial(e) + ) || []; + message.serviceAccountId = object.serviceAccountId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateComputeInstance.$type, CreateComputeInstance); + +const baseCreateComputeInstance_LabelsEntry: object = { + $type: + "yandex.cloud.loadtesting.api.v1.agent.CreateComputeInstance.LabelsEntry", + key: "", + value: "", +}; + +export const CreateComputeInstance_LabelsEntry = { + $type: + "yandex.cloud.loadtesting.api.v1.agent.CreateComputeInstance.LabelsEntry" as const, + + encode( + message: CreateComputeInstance_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateComputeInstance_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateComputeInstance_LabelsEntry, + } as CreateComputeInstance_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateComputeInstance_LabelsEntry { + const message = { + ...baseCreateComputeInstance_LabelsEntry, + } as CreateComputeInstance_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateComputeInstance_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateComputeInstance_LabelsEntry { + const message = { + ...baseCreateComputeInstance_LabelsEntry, + } as CreateComputeInstance_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateComputeInstance_LabelsEntry.$type, + CreateComputeInstance_LabelsEntry +); + +const baseCreateComputeInstance_MetadataEntry: object = { + $type: + "yandex.cloud.loadtesting.api.v1.agent.CreateComputeInstance.MetadataEntry", + key: "", + value: "", +}; + +export const CreateComputeInstance_MetadataEntry = { + $type: + "yandex.cloud.loadtesting.api.v1.agent.CreateComputeInstance.MetadataEntry" as const, + + encode( + message: CreateComputeInstance_MetadataEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateComputeInstance_MetadataEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateComputeInstance_MetadataEntry, + } as CreateComputeInstance_MetadataEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateComputeInstance_MetadataEntry { + const message = { + ...baseCreateComputeInstance_MetadataEntry, + } as CreateComputeInstance_MetadataEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateComputeInstance_MetadataEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateComputeInstance_MetadataEntry { + const message = { + ...baseCreateComputeInstance_MetadataEntry, + } as CreateComputeInstance_MetadataEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateComputeInstance_MetadataEntry.$type, + CreateComputeInstance_MetadataEntry +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/api/v1/agent/status.ts b/src/generated/yandex/cloud/loadtesting/api/v1/agent/status.ts new file mode 100644 index 00000000..de42d092 --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/api/v1/agent/status.ts @@ -0,0 +1,132 @@ +/* eslint-disable */ +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.loadtesting.api.v1.agent"; + +export enum Status { + STATUS_UNSPECIFIED = 0, + PREPARING_TEST = 1, + READY_FOR_TEST = 2, + TESTING = 3, + TANK_FAILED = 4, + PROVISIONING = 5, + STOPPING = 6, + STOPPED = 7, + STARTING = 8, + RESTARTING = 9, + UPDATING = 10, + ERROR = 11, + CRASHED = 12, + DELETING = 13, + INITIALIZING_CONNECTION = 15, + LOST_CONNECTION_WITH_AGENT = 16, + UPLOADING_ARTIFACTS = 17, + UNRECOGNIZED = -1, +} + +export function statusFromJSON(object: any): Status { + switch (object) { + case 0: + case "STATUS_UNSPECIFIED": + return Status.STATUS_UNSPECIFIED; + case 1: + case "PREPARING_TEST": + return Status.PREPARING_TEST; + case 2: + case "READY_FOR_TEST": + return Status.READY_FOR_TEST; + case 3: + case "TESTING": + return Status.TESTING; + case 4: + case "TANK_FAILED": + return Status.TANK_FAILED; + case 5: + case "PROVISIONING": + return Status.PROVISIONING; + case 6: + case "STOPPING": + return Status.STOPPING; + case 7: + case "STOPPED": + return Status.STOPPED; + case 8: + case "STARTING": + return Status.STARTING; + case 9: + case "RESTARTING": + return Status.RESTARTING; + case 10: + case "UPDATING": + return Status.UPDATING; + case 11: + case "ERROR": + return Status.ERROR; + case 12: + case "CRASHED": + return Status.CRASHED; + case 13: + case "DELETING": + return Status.DELETING; + case 15: + case "INITIALIZING_CONNECTION": + return Status.INITIALIZING_CONNECTION; + case 16: + case "LOST_CONNECTION_WITH_AGENT": + return Status.LOST_CONNECTION_WITH_AGENT; + case 17: + case "UPLOADING_ARTIFACTS": + return Status.UPLOADING_ARTIFACTS; + case -1: + case "UNRECOGNIZED": + default: + return Status.UNRECOGNIZED; + } +} + +export function statusToJSON(object: Status): string { + switch (object) { + case Status.STATUS_UNSPECIFIED: + return "STATUS_UNSPECIFIED"; + case Status.PREPARING_TEST: + return "PREPARING_TEST"; + case Status.READY_FOR_TEST: + return "READY_FOR_TEST"; + case Status.TESTING: + return "TESTING"; + case Status.TANK_FAILED: + return "TANK_FAILED"; + case Status.PROVISIONING: + return "PROVISIONING"; + case Status.STOPPING: + return "STOPPING"; + case Status.STOPPED: + return "STOPPED"; + case Status.STARTING: + return "STARTING"; + case Status.RESTARTING: + return "RESTARTING"; + case Status.UPDATING: + return "UPDATING"; + case Status.ERROR: + return "ERROR"; + case Status.CRASHED: + return "CRASHED"; + case Status.DELETING: + return "DELETING"; + case Status.INITIALIZING_CONNECTION: + return "INITIALIZING_CONNECTION"; + case Status.LOST_CONNECTION_WITH_AGENT: + return "LOST_CONNECTION_WITH_AGENT"; + case Status.UPLOADING_ARTIFACTS: + return "UPLOADING_ARTIFACTS"; + default: + return "UNKNOWN"; + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/api/v1/agent_service.ts b/src/generated/yandex/cloud/loadtesting/api/v1/agent_service.ts new file mode 100644 index 00000000..256c9886 --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/api/v1/agent_service.ts @@ -0,0 +1,799 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { CreateComputeInstance } from "../../../../../yandex/cloud/loadtesting/api/v1/agent/create_compute_instance"; +import { Agent } from "../../../../../yandex/cloud/loadtesting/api/v1/agent/agent"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.loadtesting.api.v1"; + +export interface CreateAgentRequest { + $type: "yandex.cloud.loadtesting.api.v1.CreateAgentRequest"; + folderId: string; + name: string; + description: string; + computeInstanceParams?: CreateComputeInstance; + agentVersion: string; +} + +export interface CreateAgentMetadata { + $type: "yandex.cloud.loadtesting.api.v1.CreateAgentMetadata"; + agentId: string; +} + +export interface GetAgentRequest { + $type: "yandex.cloud.loadtesting.api.v1.GetAgentRequest"; + agentId: string; +} + +export interface DeleteAgentRequest { + $type: "yandex.cloud.loadtesting.api.v1.DeleteAgentRequest"; + agentId: string; +} + +export interface DeleteAgentMetadata { + $type: "yandex.cloud.loadtesting.api.v1.DeleteAgentMetadata"; + agentId: string; +} + +export interface ListAgentsRequest { + $type: "yandex.cloud.loadtesting.api.v1.ListAgentsRequest"; + folderId: string; + pageSize: number; + pageToken: string; + filter: string; +} + +export interface ListAgentsResponse { + $type: "yandex.cloud.loadtesting.api.v1.ListAgentsResponse"; + agents: Agent[]; + nextPageToken: string; +} + +const baseCreateAgentRequest: object = { + $type: "yandex.cloud.loadtesting.api.v1.CreateAgentRequest", + folderId: "", + name: "", + description: "", + agentVersion: "", +}; + +export const CreateAgentRequest = { + $type: "yandex.cloud.loadtesting.api.v1.CreateAgentRequest" as const, + + encode( + message: CreateAgentRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + if (message.computeInstanceParams !== undefined) { + CreateComputeInstance.encode( + message.computeInstanceParams, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.agentVersion !== "") { + writer.uint32(42).string(message.agentVersion); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateAgentRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateAgentRequest } as CreateAgentRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + message.computeInstanceParams = CreateComputeInstance.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.agentVersion = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateAgentRequest { + const message = { ...baseCreateAgentRequest } as CreateAgentRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.computeInstanceParams = + object.computeInstanceParams !== undefined && + object.computeInstanceParams !== null + ? CreateComputeInstance.fromJSON(object.computeInstanceParams) + : undefined; + message.agentVersion = + object.agentVersion !== undefined && object.agentVersion !== null + ? String(object.agentVersion) + : ""; + return message; + }, + + toJSON(message: CreateAgentRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + message.computeInstanceParams !== undefined && + (obj.computeInstanceParams = message.computeInstanceParams + ? CreateComputeInstance.toJSON(message.computeInstanceParams) + : undefined); + message.agentVersion !== undefined && + (obj.agentVersion = message.agentVersion); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateAgentRequest { + const message = { ...baseCreateAgentRequest } as CreateAgentRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.computeInstanceParams = + object.computeInstanceParams !== undefined && + object.computeInstanceParams !== null + ? CreateComputeInstance.fromPartial(object.computeInstanceParams) + : undefined; + message.agentVersion = object.agentVersion ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateAgentRequest.$type, CreateAgentRequest); + +const baseCreateAgentMetadata: object = { + $type: "yandex.cloud.loadtesting.api.v1.CreateAgentMetadata", + agentId: "", +}; + +export const CreateAgentMetadata = { + $type: "yandex.cloud.loadtesting.api.v1.CreateAgentMetadata" as const, + + encode( + message: CreateAgentMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.agentId !== "") { + writer.uint32(10).string(message.agentId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateAgentMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateAgentMetadata } as CreateAgentMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.agentId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateAgentMetadata { + const message = { ...baseCreateAgentMetadata } as CreateAgentMetadata; + message.agentId = + object.agentId !== undefined && object.agentId !== null + ? String(object.agentId) + : ""; + return message; + }, + + toJSON(message: CreateAgentMetadata): unknown { + const obj: any = {}; + message.agentId !== undefined && (obj.agentId = message.agentId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateAgentMetadata { + const message = { ...baseCreateAgentMetadata } as CreateAgentMetadata; + message.agentId = object.agentId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateAgentMetadata.$type, CreateAgentMetadata); + +const baseGetAgentRequest: object = { + $type: "yandex.cloud.loadtesting.api.v1.GetAgentRequest", + agentId: "", +}; + +export const GetAgentRequest = { + $type: "yandex.cloud.loadtesting.api.v1.GetAgentRequest" as const, + + encode( + message: GetAgentRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.agentId !== "") { + writer.uint32(18).string(message.agentId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetAgentRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetAgentRequest } as GetAgentRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.agentId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetAgentRequest { + const message = { ...baseGetAgentRequest } as GetAgentRequest; + message.agentId = + object.agentId !== undefined && object.agentId !== null + ? String(object.agentId) + : ""; + return message; + }, + + toJSON(message: GetAgentRequest): unknown { + const obj: any = {}; + message.agentId !== undefined && (obj.agentId = message.agentId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetAgentRequest { + const message = { ...baseGetAgentRequest } as GetAgentRequest; + message.agentId = object.agentId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetAgentRequest.$type, GetAgentRequest); + +const baseDeleteAgentRequest: object = { + $type: "yandex.cloud.loadtesting.api.v1.DeleteAgentRequest", + agentId: "", +}; + +export const DeleteAgentRequest = { + $type: "yandex.cloud.loadtesting.api.v1.DeleteAgentRequest" as const, + + encode( + message: DeleteAgentRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.agentId !== "") { + writer.uint32(10).string(message.agentId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteAgentRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteAgentRequest } as DeleteAgentRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.agentId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteAgentRequest { + const message = { ...baseDeleteAgentRequest } as DeleteAgentRequest; + message.agentId = + object.agentId !== undefined && object.agentId !== null + ? String(object.agentId) + : ""; + return message; + }, + + toJSON(message: DeleteAgentRequest): unknown { + const obj: any = {}; + message.agentId !== undefined && (obj.agentId = message.agentId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteAgentRequest { + const message = { ...baseDeleteAgentRequest } as DeleteAgentRequest; + message.agentId = object.agentId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteAgentRequest.$type, DeleteAgentRequest); + +const baseDeleteAgentMetadata: object = { + $type: "yandex.cloud.loadtesting.api.v1.DeleteAgentMetadata", + agentId: "", +}; + +export const DeleteAgentMetadata = { + $type: "yandex.cloud.loadtesting.api.v1.DeleteAgentMetadata" as const, + + encode( + message: DeleteAgentMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.agentId !== "") { + writer.uint32(10).string(message.agentId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteAgentMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteAgentMetadata } as DeleteAgentMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.agentId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteAgentMetadata { + const message = { ...baseDeleteAgentMetadata } as DeleteAgentMetadata; + message.agentId = + object.agentId !== undefined && object.agentId !== null + ? String(object.agentId) + : ""; + return message; + }, + + toJSON(message: DeleteAgentMetadata): unknown { + const obj: any = {}; + message.agentId !== undefined && (obj.agentId = message.agentId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteAgentMetadata { + const message = { ...baseDeleteAgentMetadata } as DeleteAgentMetadata; + message.agentId = object.agentId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteAgentMetadata.$type, DeleteAgentMetadata); + +const baseListAgentsRequest: object = { + $type: "yandex.cloud.loadtesting.api.v1.ListAgentsRequest", + folderId: "", + pageSize: 0, + pageToken: "", + filter: "", +}; + +export const ListAgentsRequest = { + $type: "yandex.cloud.loadtesting.api.v1.ListAgentsRequest" as const, + + encode( + message: ListAgentsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListAgentsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListAgentsRequest } as ListAgentsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListAgentsRequest { + const message = { ...baseListAgentsRequest } as ListAgentsRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListAgentsRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListAgentsRequest { + const message = { ...baseListAgentsRequest } as ListAgentsRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListAgentsRequest.$type, ListAgentsRequest); + +const baseListAgentsResponse: object = { + $type: "yandex.cloud.loadtesting.api.v1.ListAgentsResponse", + nextPageToken: "", +}; + +export const ListAgentsResponse = { + $type: "yandex.cloud.loadtesting.api.v1.ListAgentsResponse" as const, + + encode( + message: ListAgentsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.agents) { + Agent.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListAgentsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListAgentsResponse } as ListAgentsResponse; + message.agents = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.agents.push(Agent.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListAgentsResponse { + const message = { ...baseListAgentsResponse } as ListAgentsResponse; + message.agents = (object.agents ?? []).map((e: any) => Agent.fromJSON(e)); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListAgentsResponse): unknown { + const obj: any = {}; + if (message.agents) { + obj.agents = message.agents.map((e) => (e ? Agent.toJSON(e) : undefined)); + } else { + obj.agents = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListAgentsResponse { + const message = { ...baseListAgentsResponse } as ListAgentsResponse; + message.agents = object.agents?.map((e) => Agent.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListAgentsResponse.$type, ListAgentsResponse); + +export const AgentServiceService = { + create: { + path: "/yandex.cloud.loadtesting.api.v1.AgentService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateAgentRequest) => + Buffer.from(CreateAgentRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateAgentRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + get: { + path: "/yandex.cloud.loadtesting.api.v1.AgentService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetAgentRequest) => + Buffer.from(GetAgentRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetAgentRequest.decode(value), + responseSerialize: (value: Agent) => + Buffer.from(Agent.encode(value).finish()), + responseDeserialize: (value: Buffer) => Agent.decode(value), + }, + list: { + path: "/yandex.cloud.loadtesting.api.v1.AgentService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAgentsRequest) => + Buffer.from(ListAgentsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListAgentsRequest.decode(value), + responseSerialize: (value: ListAgentsResponse) => + Buffer.from(ListAgentsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListAgentsResponse.decode(value), + }, + delete: { + path: "/yandex.cloud.loadtesting.api.v1.AgentService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteAgentRequest) => + Buffer.from(DeleteAgentRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteAgentRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface AgentServiceServer extends UntypedServiceImplementation { + create: handleUnaryCall; + get: handleUnaryCall; + list: handleUnaryCall; + delete: handleUnaryCall; +} + +export interface AgentServiceClient extends Client { + create( + request: CreateAgentRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateAgentRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateAgentRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + get( + request: GetAgentRequest, + callback: (error: ServiceError | null, response: Agent) => void + ): ClientUnaryCall; + get( + request: GetAgentRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Agent) => void + ): ClientUnaryCall; + get( + request: GetAgentRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Agent) => void + ): ClientUnaryCall; + list( + request: ListAgentsRequest, + callback: (error: ServiceError | null, response: ListAgentsResponse) => void + ): ClientUnaryCall; + list( + request: ListAgentsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: ListAgentsResponse) => void + ): ClientUnaryCall; + list( + request: ListAgentsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: ListAgentsResponse) => void + ): ClientUnaryCall; + delete( + request: DeleteAgentRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteAgentRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteAgentRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const AgentServiceClient = makeGenericClientConstructor( + AgentServiceService, + "yandex.cloud.loadtesting.api.v1.AgentService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): AgentServiceClient; + service: typeof AgentServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/loadtesting/index.ts b/src/generated/yandex/cloud/loadtesting/index.ts new file mode 100644 index 00000000..6478188a --- /dev/null +++ b/src/generated/yandex/cloud/loadtesting/index.ts @@ -0,0 +1,12 @@ +export * as agent from './agent/v1/agent' +export * as agent_agent_registration_service from './agent/v1/agent_registration_service' +export * as agent_service from './agent/v1/agent_service' +export * as agent_job_service from './agent/v1/job_service' +export * as agent_monitoring_service from './agent/v1/monitoring_service' +export * as agent_test from './agent/v1/test' +export * as agent_test_service from './agent/v1/test_service' +export * as agent_trail_service from './agent/v1/trail_service' +export * as api_agent_service from './api/v1/agent_service' +export * as api_agent from './api/v1/agent/agent' +export * as api_create_compute_instance from './api/v1/agent/create_compute_instance' +export * as api_status from './api/v1/agent/status' \ No newline at end of file diff --git a/src/generated/yandex/cloud/logging/index.ts b/src/generated/yandex/cloud/logging/index.ts index e290200f..56cf6eae 100644 --- a/src/generated/yandex/cloud/logging/index.ts +++ b/src/generated/yandex/cloud/logging/index.ts @@ -1,6 +1,10 @@ +export * as export from './v1/export' +export * as export_service from './v1/export_service' export * as log_entry from './v1/log_entry' export * as log_group from './v1/log_group' export * as log_group_service from './v1/log_group_service' export * as log_ingestion_service from './v1/log_ingestion_service' export * as log_reading_service from './v1/log_reading_service' -export * as log_resource from './v1/log_resource' \ No newline at end of file +export * as log_resource from './v1/log_resource' +export * as sink from './v1/sink' +export * as sink_service from './v1/sink_service' \ No newline at end of file diff --git a/src/generated/yandex/cloud/logging/v1/export.ts b/src/generated/yandex/cloud/logging/v1/export.ts new file mode 100644 index 00000000..baf06e34 --- /dev/null +++ b/src/generated/yandex/cloud/logging/v1/export.ts @@ -0,0 +1,187 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + LogLevel_Level, + logLevel_LevelFromJSON, + logLevel_LevelToJSON, +} from "../../../../yandex/cloud/logging/v1/log_entry"; + +export const protobufPackage = "yandex.cloud.logging.v1"; + +export interface ExportParams { + $type: "yandex.cloud.logging.v1.ExportParams"; + resourceTypes: string[]; + resourceIds: string[]; + streamNames: string[]; + levels: LogLevel_Level[]; + filter: string; +} + +const baseExportParams: object = { + $type: "yandex.cloud.logging.v1.ExportParams", + resourceTypes: "", + resourceIds: "", + streamNames: "", + levels: 0, + filter: "", +}; + +export const ExportParams = { + $type: "yandex.cloud.logging.v1.ExportParams" as const, + + encode( + message: ExportParams, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.resourceTypes) { + writer.uint32(10).string(v!); + } + for (const v of message.resourceIds) { + writer.uint32(18).string(v!); + } + for (const v of message.streamNames) { + writer.uint32(26).string(v!); + } + writer.uint32(34).fork(); + for (const v of message.levels) { + writer.int32(v); + } + writer.ldelim(); + if (message.filter !== "") { + writer.uint32(42).string(message.filter); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ExportParams { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExportParams } as ExportParams; + message.resourceTypes = []; + message.resourceIds = []; + message.streamNames = []; + message.levels = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceTypes.push(reader.string()); + break; + case 2: + message.resourceIds.push(reader.string()); + break; + case 3: + message.streamNames.push(reader.string()); + break; + case 4: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.levels.push(reader.int32() as any); + } + } else { + message.levels.push(reader.int32() as any); + } + break; + case 5: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExportParams { + const message = { ...baseExportParams } as ExportParams; + message.resourceTypes = (object.resourceTypes ?? []).map((e: any) => + String(e) + ); + message.resourceIds = (object.resourceIds ?? []).map((e: any) => String(e)); + message.streamNames = (object.streamNames ?? []).map((e: any) => String(e)); + message.levels = (object.levels ?? []).map((e: any) => + logLevel_LevelFromJSON(e) + ); + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ExportParams): unknown { + const obj: any = {}; + if (message.resourceTypes) { + obj.resourceTypes = message.resourceTypes.map((e) => e); + } else { + obj.resourceTypes = []; + } + if (message.resourceIds) { + obj.resourceIds = message.resourceIds.map((e) => e); + } else { + obj.resourceIds = []; + } + if (message.streamNames) { + obj.streamNames = message.streamNames.map((e) => e); + } else { + obj.streamNames = []; + } + if (message.levels) { + obj.levels = message.levels.map((e) => logLevel_LevelToJSON(e)); + } else { + obj.levels = []; + } + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExportParams { + const message = { ...baseExportParams } as ExportParams; + message.resourceTypes = object.resourceTypes?.map((e) => e) || []; + message.resourceIds = object.resourceIds?.map((e) => e) || []; + message.streamNames = object.streamNames?.map((e) => e) || []; + message.levels = object.levels?.map((e) => e) || []; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ExportParams.$type, ExportParams); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/logging/v1/export_service.ts b/src/generated/yandex/cloud/logging/v1/export_service.ts new file mode 100644 index 00000000..52654780 --- /dev/null +++ b/src/generated/yandex/cloud/logging/v1/export_service.ts @@ -0,0 +1,521 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { ExportParams } from "../../../../yandex/cloud/logging/v1/export"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.logging.v1"; + +export interface RunExportRequest { + $type: "yandex.cloud.logging.v1.RunExportRequest"; + groupId: string; + sinkId: string; + params?: ExportParams; + resultFilename: string; + since?: Date; + until?: Date; +} + +export interface RunExportDetails { + $type: "yandex.cloud.logging.v1.RunExportDetails"; + groupId: string; + sinkId: string; + params?: ExportParams; + resultFilename: string; + since?: Date; + until?: Date; +} + +export interface RunExportMetadata { + $type: "yandex.cloud.logging.v1.RunExportMetadata"; + groupId: string; + sinkId: string; + resultFilename: string; +} + +const baseRunExportRequest: object = { + $type: "yandex.cloud.logging.v1.RunExportRequest", + groupId: "", + sinkId: "", + resultFilename: "", +}; + +export const RunExportRequest = { + $type: "yandex.cloud.logging.v1.RunExportRequest" as const, + + encode( + message: RunExportRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + if (message.sinkId !== "") { + writer.uint32(18).string(message.sinkId); + } + if (message.params !== undefined) { + ExportParams.encode(message.params, writer.uint32(26).fork()).ldelim(); + } + if (message.resultFilename !== "") { + writer.uint32(34).string(message.resultFilename); + } + if (message.since !== undefined) { + Timestamp.encode( + toTimestamp(message.since), + writer.uint32(42).fork() + ).ldelim(); + } + if (message.until !== undefined) { + Timestamp.encode( + toTimestamp(message.until), + writer.uint32(50).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RunExportRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRunExportRequest } as RunExportRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + case 2: + message.sinkId = reader.string(); + break; + case 3: + message.params = ExportParams.decode(reader, reader.uint32()); + break; + case 4: + message.resultFilename = reader.string(); + break; + case 5: + message.since = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.until = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RunExportRequest { + const message = { ...baseRunExportRequest } as RunExportRequest; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + message.sinkId = + object.sinkId !== undefined && object.sinkId !== null + ? String(object.sinkId) + : ""; + message.params = + object.params !== undefined && object.params !== null + ? ExportParams.fromJSON(object.params) + : undefined; + message.resultFilename = + object.resultFilename !== undefined && object.resultFilename !== null + ? String(object.resultFilename) + : ""; + message.since = + object.since !== undefined && object.since !== null + ? fromJsonTimestamp(object.since) + : undefined; + message.until = + object.until !== undefined && object.until !== null + ? fromJsonTimestamp(object.until) + : undefined; + return message; + }, + + toJSON(message: RunExportRequest): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + message.sinkId !== undefined && (obj.sinkId = message.sinkId); + message.params !== undefined && + (obj.params = message.params + ? ExportParams.toJSON(message.params) + : undefined); + message.resultFilename !== undefined && + (obj.resultFilename = message.resultFilename); + message.since !== undefined && (obj.since = message.since.toISOString()); + message.until !== undefined && (obj.until = message.until.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): RunExportRequest { + const message = { ...baseRunExportRequest } as RunExportRequest; + message.groupId = object.groupId ?? ""; + message.sinkId = object.sinkId ?? ""; + message.params = + object.params !== undefined && object.params !== null + ? ExportParams.fromPartial(object.params) + : undefined; + message.resultFilename = object.resultFilename ?? ""; + message.since = object.since ?? undefined; + message.until = object.until ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(RunExportRequest.$type, RunExportRequest); + +const baseRunExportDetails: object = { + $type: "yandex.cloud.logging.v1.RunExportDetails", + groupId: "", + sinkId: "", + resultFilename: "", +}; + +export const RunExportDetails = { + $type: "yandex.cloud.logging.v1.RunExportDetails" as const, + + encode( + message: RunExportDetails, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + if (message.sinkId !== "") { + writer.uint32(18).string(message.sinkId); + } + if (message.params !== undefined) { + ExportParams.encode(message.params, writer.uint32(26).fork()).ldelim(); + } + if (message.resultFilename !== "") { + writer.uint32(34).string(message.resultFilename); + } + if (message.since !== undefined) { + Timestamp.encode( + toTimestamp(message.since), + writer.uint32(42).fork() + ).ldelim(); + } + if (message.until !== undefined) { + Timestamp.encode( + toTimestamp(message.until), + writer.uint32(50).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RunExportDetails { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRunExportDetails } as RunExportDetails; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + case 2: + message.sinkId = reader.string(); + break; + case 3: + message.params = ExportParams.decode(reader, reader.uint32()); + break; + case 4: + message.resultFilename = reader.string(); + break; + case 5: + message.since = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.until = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RunExportDetails { + const message = { ...baseRunExportDetails } as RunExportDetails; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + message.sinkId = + object.sinkId !== undefined && object.sinkId !== null + ? String(object.sinkId) + : ""; + message.params = + object.params !== undefined && object.params !== null + ? ExportParams.fromJSON(object.params) + : undefined; + message.resultFilename = + object.resultFilename !== undefined && object.resultFilename !== null + ? String(object.resultFilename) + : ""; + message.since = + object.since !== undefined && object.since !== null + ? fromJsonTimestamp(object.since) + : undefined; + message.until = + object.until !== undefined && object.until !== null + ? fromJsonTimestamp(object.until) + : undefined; + return message; + }, + + toJSON(message: RunExportDetails): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + message.sinkId !== undefined && (obj.sinkId = message.sinkId); + message.params !== undefined && + (obj.params = message.params + ? ExportParams.toJSON(message.params) + : undefined); + message.resultFilename !== undefined && + (obj.resultFilename = message.resultFilename); + message.since !== undefined && (obj.since = message.since.toISOString()); + message.until !== undefined && (obj.until = message.until.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): RunExportDetails { + const message = { ...baseRunExportDetails } as RunExportDetails; + message.groupId = object.groupId ?? ""; + message.sinkId = object.sinkId ?? ""; + message.params = + object.params !== undefined && object.params !== null + ? ExportParams.fromPartial(object.params) + : undefined; + message.resultFilename = object.resultFilename ?? ""; + message.since = object.since ?? undefined; + message.until = object.until ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(RunExportDetails.$type, RunExportDetails); + +const baseRunExportMetadata: object = { + $type: "yandex.cloud.logging.v1.RunExportMetadata", + groupId: "", + sinkId: "", + resultFilename: "", +}; + +export const RunExportMetadata = { + $type: "yandex.cloud.logging.v1.RunExportMetadata" as const, + + encode( + message: RunExportMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + if (message.sinkId !== "") { + writer.uint32(18).string(message.sinkId); + } + if (message.resultFilename !== "") { + writer.uint32(26).string(message.resultFilename); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RunExportMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRunExportMetadata } as RunExportMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + case 2: + message.sinkId = reader.string(); + break; + case 3: + message.resultFilename = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RunExportMetadata { + const message = { ...baseRunExportMetadata } as RunExportMetadata; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + message.sinkId = + object.sinkId !== undefined && object.sinkId !== null + ? String(object.sinkId) + : ""; + message.resultFilename = + object.resultFilename !== undefined && object.resultFilename !== null + ? String(object.resultFilename) + : ""; + return message; + }, + + toJSON(message: RunExportMetadata): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + message.sinkId !== undefined && (obj.sinkId = message.sinkId); + message.resultFilename !== undefined && + (obj.resultFilename = message.resultFilename); + return obj; + }, + + fromPartial, I>>( + object: I + ): RunExportMetadata { + const message = { ...baseRunExportMetadata } as RunExportMetadata; + message.groupId = object.groupId ?? ""; + message.sinkId = object.sinkId ?? ""; + message.resultFilename = object.resultFilename ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RunExportMetadata.$type, RunExportMetadata); + +export const ExportServiceService = { + run: { + path: "/yandex.cloud.logging.v1.ExportService/Run", + requestStream: false, + responseStream: false, + requestSerialize: (value: RunExportRequest) => + Buffer.from(RunExportRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RunExportRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface ExportServiceServer extends UntypedServiceImplementation { + run: handleUnaryCall; +} + +export interface ExportServiceClient extends Client { + run( + request: RunExportRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + run( + request: RunExportRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + run( + request: RunExportRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const ExportServiceClient = makeGenericClientConstructor( + ExportServiceService, + "yandex.cloud.logging.v1.ExportService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ExportServiceClient; + service: typeof ExportServiceService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/logging/v1/log_ingestion_service.ts b/src/generated/yandex/cloud/logging/v1/log_ingestion_service.ts index 06edd01e..3b8b4141 100644 --- a/src/generated/yandex/cloud/logging/v1/log_ingestion_service.ts +++ b/src/generated/yandex/cloud/logging/v1/log_ingestion_service.ts @@ -370,7 +370,7 @@ messageTypeRegistry.set( WriteResponse_ErrorsEntry ); -/** A set of methods for writing to log groups. To make a request use `ingester.logging.yandexcloud.net`. */ +/** A set of methods for writing to log groups. */ export const LogIngestionServiceService = { /** Write log entries to specified destination. */ write: { diff --git a/src/generated/yandex/cloud/logging/v1/log_reading_service.ts b/src/generated/yandex/cloud/logging/v1/log_reading_service.ts index aa330d88..480c85f0 100644 --- a/src/generated/yandex/cloud/logging/v1/log_reading_service.ts +++ b/src/generated/yandex/cloud/logging/v1/log_reading_service.ts @@ -510,7 +510,7 @@ export const Criteria = { messageTypeRegistry.set(Criteria.$type, Criteria); -/** A set of methods for reading from log groups. To make a request use `reader.logging.yandexcloud.net`. */ +/** A set of methods for reading from log groups. */ export const LogReadingServiceService = { /** Read log entries from the specified log group. */ read: { diff --git a/src/generated/yandex/cloud/logging/v1/sink.ts b/src/generated/yandex/cloud/logging/v1/sink.ts new file mode 100644 index 00000000..83a2e93c --- /dev/null +++ b/src/generated/yandex/cloud/logging/v1/sink.ts @@ -0,0 +1,520 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.logging.v1"; + +export interface Sink { + $type: "yandex.cloud.logging.v1.Sink"; + /** Sink ID. */ + id: string; + /** Sink folder ID. */ + folderId: string; + /** Sink cloud ID. */ + cloudId: string; + /** Sink creation time. */ + createdAt?: Date; + /** Sink name. */ + name: string; + /** Sink description. */ + description: string; + /** Sink labels. */ + labels: { [key: string]: string }; + /** Logs will be written to the sink on behalf of this service account */ + serviceAccountId: string; + /** Yandex data stream */ + yds?: Sink_Yds | undefined; + /** Object storage */ + s3?: Sink_S3 | undefined; +} + +export interface Sink_LabelsEntry { + $type: "yandex.cloud.logging.v1.Sink.LabelsEntry"; + key: string; + value: string; +} + +export interface Sink_Yds { + $type: "yandex.cloud.logging.v1.Sink.Yds"; + /** Fully qualified name of data stream */ + streamName: string; +} + +export interface Sink_S3 { + $type: "yandex.cloud.logging.v1.Sink.S3"; + /** Object storage bucket */ + bucket: string; + /** Prefix to use for saved log object names */ + prefix: string; +} + +const baseSink: object = { + $type: "yandex.cloud.logging.v1.Sink", + id: "", + folderId: "", + cloudId: "", + name: "", + description: "", + serviceAccountId: "", +}; + +export const Sink = { + $type: "yandex.cloud.logging.v1.Sink" as const, + + encode(message: Sink, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.cloudId !== "") { + writer.uint32(26).string(message.cloudId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(42).string(message.name); + } + if (message.description !== "") { + writer.uint32(50).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + Sink_LabelsEntry.encode( + { + $type: "yandex.cloud.logging.v1.Sink.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(58).fork() + ).ldelim(); + }); + if (message.serviceAccountId !== "") { + writer.uint32(66).string(message.serviceAccountId); + } + if (message.yds !== undefined) { + Sink_Yds.encode(message.yds, writer.uint32(74).fork()).ldelim(); + } + if (message.s3 !== undefined) { + Sink_S3.encode(message.s3, writer.uint32(82).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Sink { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSink } as Sink; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.cloudId = reader.string(); + break; + case 4: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.name = reader.string(); + break; + case 6: + message.description = reader.string(); + break; + case 7: + const entry7 = Sink_LabelsEntry.decode(reader, reader.uint32()); + if (entry7.value !== undefined) { + message.labels[entry7.key] = entry7.value; + } + break; + case 8: + message.serviceAccountId = reader.string(); + break; + case 9: + message.yds = Sink_Yds.decode(reader, reader.uint32()); + break; + case 10: + message.s3 = Sink_S3.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Sink { + const message = { ...baseSink } as Sink; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.cloudId = + object.cloudId !== undefined && object.cloudId !== null + ? String(object.cloudId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.yds = + object.yds !== undefined && object.yds !== null + ? Sink_Yds.fromJSON(object.yds) + : undefined; + message.s3 = + object.s3 !== undefined && object.s3 !== null + ? Sink_S3.fromJSON(object.s3) + : undefined; + return message; + }, + + toJSON(message: Sink): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.cloudId !== undefined && (obj.cloudId = message.cloudId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.yds !== undefined && + (obj.yds = message.yds ? Sink_Yds.toJSON(message.yds) : undefined); + message.s3 !== undefined && + (obj.s3 = message.s3 ? Sink_S3.toJSON(message.s3) : undefined); + return obj; + }, + + fromPartial, I>>(object: I): Sink { + const message = { ...baseSink } as Sink; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.cloudId = object.cloudId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.serviceAccountId = object.serviceAccountId ?? ""; + message.yds = + object.yds !== undefined && object.yds !== null + ? Sink_Yds.fromPartial(object.yds) + : undefined; + message.s3 = + object.s3 !== undefined && object.s3 !== null + ? Sink_S3.fromPartial(object.s3) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Sink.$type, Sink); + +const baseSink_LabelsEntry: object = { + $type: "yandex.cloud.logging.v1.Sink.LabelsEntry", + key: "", + value: "", +}; + +export const Sink_LabelsEntry = { + $type: "yandex.cloud.logging.v1.Sink.LabelsEntry" as const, + + encode( + message: Sink_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Sink_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSink_LabelsEntry } as Sink_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Sink_LabelsEntry { + const message = { ...baseSink_LabelsEntry } as Sink_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Sink_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): Sink_LabelsEntry { + const message = { ...baseSink_LabelsEntry } as Sink_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Sink_LabelsEntry.$type, Sink_LabelsEntry); + +const baseSink_Yds: object = { + $type: "yandex.cloud.logging.v1.Sink.Yds", + streamName: "", +}; + +export const Sink_Yds = { + $type: "yandex.cloud.logging.v1.Sink.Yds" as const, + + encode( + message: Sink_Yds, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.streamName !== "") { + writer.uint32(10).string(message.streamName); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Sink_Yds { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSink_Yds } as Sink_Yds; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.streamName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Sink_Yds { + const message = { ...baseSink_Yds } as Sink_Yds; + message.streamName = + object.streamName !== undefined && object.streamName !== null + ? String(object.streamName) + : ""; + return message; + }, + + toJSON(message: Sink_Yds): unknown { + const obj: any = {}; + message.streamName !== undefined && (obj.streamName = message.streamName); + return obj; + }, + + fromPartial, I>>(object: I): Sink_Yds { + const message = { ...baseSink_Yds } as Sink_Yds; + message.streamName = object.streamName ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Sink_Yds.$type, Sink_Yds); + +const baseSink_S3: object = { + $type: "yandex.cloud.logging.v1.Sink.S3", + bucket: "", + prefix: "", +}; + +export const Sink_S3 = { + $type: "yandex.cloud.logging.v1.Sink.S3" as const, + + encode( + message: Sink_S3, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.bucket !== "") { + writer.uint32(10).string(message.bucket); + } + if (message.prefix !== "") { + writer.uint32(18).string(message.prefix); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Sink_S3 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSink_S3 } as Sink_S3; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.bucket = reader.string(); + break; + case 2: + message.prefix = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Sink_S3 { + const message = { ...baseSink_S3 } as Sink_S3; + message.bucket = + object.bucket !== undefined && object.bucket !== null + ? String(object.bucket) + : ""; + message.prefix = + object.prefix !== undefined && object.prefix !== null + ? String(object.prefix) + : ""; + return message; + }, + + toJSON(message: Sink_S3): unknown { + const obj: any = {}; + message.bucket !== undefined && (obj.bucket = message.bucket); + message.prefix !== undefined && (obj.prefix = message.prefix); + return obj; + }, + + fromPartial, I>>(object: I): Sink_S3 { + const message = { ...baseSink_S3 } as Sink_S3; + message.bucket = object.bucket ?? ""; + message.prefix = object.prefix ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Sink_S3.$type, Sink_S3); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/logging/v1/sink_service.ts b/src/generated/yandex/cloud/logging/v1/sink_service.ts new file mode 100644 index 00000000..e912b0df --- /dev/null +++ b/src/generated/yandex/cloud/logging/v1/sink_service.ts @@ -0,0 +1,1846 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { FieldMask } from "../../../../google/protobuf/field_mask"; +import { + Sink, + Sink_Yds, + Sink_S3, +} from "../../../../yandex/cloud/logging/v1/sink"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; +import { + ListAccessBindingsRequest, + ListAccessBindingsResponse, + SetAccessBindingsRequest, + UpdateAccessBindingsRequest, +} from "../../../../yandex/cloud/access/access"; + +export const protobufPackage = "yandex.cloud.logging.v1"; + +export interface GetSinkRequest { + $type: "yandex.cloud.logging.v1.GetSinkRequest"; + /** + * ID of the sink to return. + * + * To get a sink ID make a [SinkService.List] request. + */ + sinkId: string; +} + +export interface ListSinksRequest { + $type: "yandex.cloud.logging.v1.ListSinksRequest"; + /** + * Folder ID of the sinks to return. + * + * To get a folder ID make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than `page_size`, the service returns a [ListSinkssResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set `page_token` to the + * [ListSinksResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; + /** + * A filter expression that filters sinks listed in the response. + * + * The expression must specify: + * 1. The field name. Currently filtering can only be applied to the [Sink.name] field. + * 2. An `=` operator. + * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. + * Example of a filter: `name="my-sink"`. + */ + filter: string; +} + +export interface ListSinksResponse { + $type: "yandex.cloud.logging.v1.ListSinksResponse"; + /** List of sinks in the specified folder. */ + sinks: Sink[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListSinksRequest.page_size], use `next_page_token` as the value + * for the [ListSinksRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +export interface CreateSinkRequest { + $type: "yandex.cloud.logging.v1.CreateSinkRequest"; + /** + * ID of the folder to create a sink in. + * + * To get a folder ID make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; + /** + * Name of the sink. + * The name must be unique within the folder. + */ + name: string; + /** Description of the sink. */ + description: string; + /** Sink labels as `key:value` pairs. */ + labels: { [key: string]: string }; + /** Logs will be written to the sink on behalf of this service account */ + serviceAccountId: string; + /** Yandex data stream */ + yds?: Sink_Yds | undefined; + /** Object storage */ + s3?: Sink_S3 | undefined; +} + +export interface CreateSinkRequest_LabelsEntry { + $type: "yandex.cloud.logging.v1.CreateSinkRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateSinkMetadata { + $type: "yandex.cloud.logging.v1.CreateSinkMetadata"; + /** ID of the sink being created. */ + sinkId: string; +} + +export interface UpdateSinkRequest { + $type: "yandex.cloud.logging.v1.UpdateSinkRequest"; + /** + * ID of the sink to update. + * + * To get a sink ID make a [SinkService.List] request. + */ + sinkId: string; + /** Field mask that specifies which attributes of the function should be updated. */ + updateMask?: FieldMask; + /** + * New name of the sink. + * The name must be unique within the folder. + */ + name: string; + /** New Description of the sink. */ + description: string; + /** New sink labels as `key:value` pairs. */ + labels: { [key: string]: string }; + /** new service account to use for logs writing to the sink. */ + serviceAccountId: string; + /** Yandex data stream */ + yds?: Sink_Yds | undefined; + /** Object storage */ + s3?: Sink_S3 | undefined; +} + +export interface UpdateSinkRequest_LabelsEntry { + $type: "yandex.cloud.logging.v1.UpdateSinkRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateSinkMetadata { + $type: "yandex.cloud.logging.v1.UpdateSinkMetadata"; + /** ID of the sink being updated. */ + sinkId: string; +} + +export interface DeleteSinkRequest { + $type: "yandex.cloud.logging.v1.DeleteSinkRequest"; + /** + * ID of the sink to delete. + * + * To get a sink ID make a [SinkService.List] request. + */ + sinkId: string; +} + +export interface DeleteSinkMetadata { + $type: "yandex.cloud.logging.v1.DeleteSinkMetadata"; + /** ID of the sink being deleted. */ + sinkId: string; +} + +export interface ListSinkOperationsRequest { + $type: "yandex.cloud.logging.v1.ListSinkOperationsRequest"; + /** + * ID of the sink to list operations for. + * + * To get a sink ID make a [SinkService.List] request. + */ + sinkId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than `page_size`, the service returns a [ListSinkOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set `page_token` to the + * [ListSinkOperationsResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; + /** + * A filter expression that filters resources listed in the response. + * + * The expression must specify: + * 1. The field name. Currently filtering can be applied to the [operation.Operation.description], [operation.Operation.created_at], [operation.Operation.modified_at], [operation.Operation.created_by], [operation.Operation.done] fields. + * 2. An `=` operator. + * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. + * Examples of a filter: `done=false`, `created_by='John.Doe'`. + */ + filter: string; +} + +export interface ListSinkOperationsResponse { + $type: "yandex.cloud.logging.v1.ListSinkOperationsResponse"; + /** List of operations for the specified sink. */ + operations: Operation[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListOSinkperationsRequest.page_size], use `next_page_token` as the value + * for the [ListSinkOperationsRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetSinkRequest: object = { + $type: "yandex.cloud.logging.v1.GetSinkRequest", + sinkId: "", +}; + +export const GetSinkRequest = { + $type: "yandex.cloud.logging.v1.GetSinkRequest" as const, + + encode( + message: GetSinkRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sinkId !== "") { + writer.uint32(10).string(message.sinkId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetSinkRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetSinkRequest } as GetSinkRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sinkId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetSinkRequest { + const message = { ...baseGetSinkRequest } as GetSinkRequest; + message.sinkId = + object.sinkId !== undefined && object.sinkId !== null + ? String(object.sinkId) + : ""; + return message; + }, + + toJSON(message: GetSinkRequest): unknown { + const obj: any = {}; + message.sinkId !== undefined && (obj.sinkId = message.sinkId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetSinkRequest { + const message = { ...baseGetSinkRequest } as GetSinkRequest; + message.sinkId = object.sinkId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetSinkRequest.$type, GetSinkRequest); + +const baseListSinksRequest: object = { + $type: "yandex.cloud.logging.v1.ListSinksRequest", + folderId: "", + pageSize: 0, + pageToken: "", + filter: "", +}; + +export const ListSinksRequest = { + $type: "yandex.cloud.logging.v1.ListSinksRequest" as const, + + encode( + message: ListSinksRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(24).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(34).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(42).string(message.filter); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListSinksRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListSinksRequest } as ListSinksRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 3: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 4: + message.pageToken = reader.string(); + break; + case 5: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSinksRequest { + const message = { ...baseListSinksRequest } as ListSinksRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListSinksRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListSinksRequest { + const message = { ...baseListSinksRequest } as ListSinksRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListSinksRequest.$type, ListSinksRequest); + +const baseListSinksResponse: object = { + $type: "yandex.cloud.logging.v1.ListSinksResponse", + nextPageToken: "", +}; + +export const ListSinksResponse = { + $type: "yandex.cloud.logging.v1.ListSinksResponse" as const, + + encode( + message: ListSinksResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.sinks) { + Sink.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListSinksResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListSinksResponse } as ListSinksResponse; + message.sinks = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sinks.push(Sink.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSinksResponse { + const message = { ...baseListSinksResponse } as ListSinksResponse; + message.sinks = (object.sinks ?? []).map((e: any) => Sink.fromJSON(e)); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListSinksResponse): unknown { + const obj: any = {}; + if (message.sinks) { + obj.sinks = message.sinks.map((e) => (e ? Sink.toJSON(e) : undefined)); + } else { + obj.sinks = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListSinksResponse { + const message = { ...baseListSinksResponse } as ListSinksResponse; + message.sinks = object.sinks?.map((e) => Sink.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListSinksResponse.$type, ListSinksResponse); + +const baseCreateSinkRequest: object = { + $type: "yandex.cloud.logging.v1.CreateSinkRequest", + folderId: "", + name: "", + description: "", + serviceAccountId: "", +}; + +export const CreateSinkRequest = { + $type: "yandex.cloud.logging.v1.CreateSinkRequest" as const, + + encode( + message: CreateSinkRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateSinkRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.logging.v1.CreateSinkRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.serviceAccountId !== "") { + writer.uint32(42).string(message.serviceAccountId); + } + if (message.yds !== undefined) { + Sink_Yds.encode(message.yds, writer.uint32(50).fork()).ldelim(); + } + if (message.s3 !== undefined) { + Sink_S3.encode(message.s3, writer.uint32(58).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateSinkRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateSinkRequest } as CreateSinkRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = CreateSinkRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.serviceAccountId = reader.string(); + break; + case 6: + message.yds = Sink_Yds.decode(reader, reader.uint32()); + break; + case 7: + message.s3 = Sink_S3.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateSinkRequest { + const message = { ...baseCreateSinkRequest } as CreateSinkRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.yds = + object.yds !== undefined && object.yds !== null + ? Sink_Yds.fromJSON(object.yds) + : undefined; + message.s3 = + object.s3 !== undefined && object.s3 !== null + ? Sink_S3.fromJSON(object.s3) + : undefined; + return message; + }, + + toJSON(message: CreateSinkRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.yds !== undefined && + (obj.yds = message.yds ? Sink_Yds.toJSON(message.yds) : undefined); + message.s3 !== undefined && + (obj.s3 = message.s3 ? Sink_S3.toJSON(message.s3) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateSinkRequest { + const message = { ...baseCreateSinkRequest } as CreateSinkRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.serviceAccountId = object.serviceAccountId ?? ""; + message.yds = + object.yds !== undefined && object.yds !== null + ? Sink_Yds.fromPartial(object.yds) + : undefined; + message.s3 = + object.s3 !== undefined && object.s3 !== null + ? Sink_S3.fromPartial(object.s3) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(CreateSinkRequest.$type, CreateSinkRequest); + +const baseCreateSinkRequest_LabelsEntry: object = { + $type: "yandex.cloud.logging.v1.CreateSinkRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateSinkRequest_LabelsEntry = { + $type: "yandex.cloud.logging.v1.CreateSinkRequest.LabelsEntry" as const, + + encode( + message: CreateSinkRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateSinkRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateSinkRequest_LabelsEntry, + } as CreateSinkRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateSinkRequest_LabelsEntry { + const message = { + ...baseCreateSinkRequest_LabelsEntry, + } as CreateSinkRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateSinkRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateSinkRequest_LabelsEntry { + const message = { + ...baseCreateSinkRequest_LabelsEntry, + } as CreateSinkRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateSinkRequest_LabelsEntry.$type, + CreateSinkRequest_LabelsEntry +); + +const baseCreateSinkMetadata: object = { + $type: "yandex.cloud.logging.v1.CreateSinkMetadata", + sinkId: "", +}; + +export const CreateSinkMetadata = { + $type: "yandex.cloud.logging.v1.CreateSinkMetadata" as const, + + encode( + message: CreateSinkMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sinkId !== "") { + writer.uint32(10).string(message.sinkId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateSinkMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateSinkMetadata } as CreateSinkMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sinkId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateSinkMetadata { + const message = { ...baseCreateSinkMetadata } as CreateSinkMetadata; + message.sinkId = + object.sinkId !== undefined && object.sinkId !== null + ? String(object.sinkId) + : ""; + return message; + }, + + toJSON(message: CreateSinkMetadata): unknown { + const obj: any = {}; + message.sinkId !== undefined && (obj.sinkId = message.sinkId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateSinkMetadata { + const message = { ...baseCreateSinkMetadata } as CreateSinkMetadata; + message.sinkId = object.sinkId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateSinkMetadata.$type, CreateSinkMetadata); + +const baseUpdateSinkRequest: object = { + $type: "yandex.cloud.logging.v1.UpdateSinkRequest", + sinkId: "", + name: "", + description: "", + serviceAccountId: "", +}; + +export const UpdateSinkRequest = { + $type: "yandex.cloud.logging.v1.UpdateSinkRequest" as const, + + encode( + message: UpdateSinkRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sinkId !== "") { + writer.uint32(10).string(message.sinkId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateSinkRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.logging.v1.UpdateSinkRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + if (message.serviceAccountId !== "") { + writer.uint32(50).string(message.serviceAccountId); + } + if (message.yds !== undefined) { + Sink_Yds.encode(message.yds, writer.uint32(58).fork()).ldelim(); + } + if (message.s3 !== undefined) { + Sink_S3.encode(message.s3, writer.uint32(66).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateSinkRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateSinkRequest } as UpdateSinkRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sinkId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = UpdateSinkRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + case 6: + message.serviceAccountId = reader.string(); + break; + case 7: + message.yds = Sink_Yds.decode(reader, reader.uint32()); + break; + case 8: + message.s3 = Sink_S3.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateSinkRequest { + const message = { ...baseUpdateSinkRequest } as UpdateSinkRequest; + message.sinkId = + object.sinkId !== undefined && object.sinkId !== null + ? String(object.sinkId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.yds = + object.yds !== undefined && object.yds !== null + ? Sink_Yds.fromJSON(object.yds) + : undefined; + message.s3 = + object.s3 !== undefined && object.s3 !== null + ? Sink_S3.fromJSON(object.s3) + : undefined; + return message; + }, + + toJSON(message: UpdateSinkRequest): unknown { + const obj: any = {}; + message.sinkId !== undefined && (obj.sinkId = message.sinkId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.yds !== undefined && + (obj.yds = message.yds ? Sink_Yds.toJSON(message.yds) : undefined); + message.s3 !== undefined && + (obj.s3 = message.s3 ? Sink_S3.toJSON(message.s3) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateSinkRequest { + const message = { ...baseUpdateSinkRequest } as UpdateSinkRequest; + message.sinkId = object.sinkId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.serviceAccountId = object.serviceAccountId ?? ""; + message.yds = + object.yds !== undefined && object.yds !== null + ? Sink_Yds.fromPartial(object.yds) + : undefined; + message.s3 = + object.s3 !== undefined && object.s3 !== null + ? Sink_S3.fromPartial(object.s3) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateSinkRequest.$type, UpdateSinkRequest); + +const baseUpdateSinkRequest_LabelsEntry: object = { + $type: "yandex.cloud.logging.v1.UpdateSinkRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateSinkRequest_LabelsEntry = { + $type: "yandex.cloud.logging.v1.UpdateSinkRequest.LabelsEntry" as const, + + encode( + message: UpdateSinkRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateSinkRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateSinkRequest_LabelsEntry, + } as UpdateSinkRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateSinkRequest_LabelsEntry { + const message = { + ...baseUpdateSinkRequest_LabelsEntry, + } as UpdateSinkRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateSinkRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateSinkRequest_LabelsEntry { + const message = { + ...baseUpdateSinkRequest_LabelsEntry, + } as UpdateSinkRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateSinkRequest_LabelsEntry.$type, + UpdateSinkRequest_LabelsEntry +); + +const baseUpdateSinkMetadata: object = { + $type: "yandex.cloud.logging.v1.UpdateSinkMetadata", + sinkId: "", +}; + +export const UpdateSinkMetadata = { + $type: "yandex.cloud.logging.v1.UpdateSinkMetadata" as const, + + encode( + message: UpdateSinkMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sinkId !== "") { + writer.uint32(10).string(message.sinkId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateSinkMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateSinkMetadata } as UpdateSinkMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sinkId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateSinkMetadata { + const message = { ...baseUpdateSinkMetadata } as UpdateSinkMetadata; + message.sinkId = + object.sinkId !== undefined && object.sinkId !== null + ? String(object.sinkId) + : ""; + return message; + }, + + toJSON(message: UpdateSinkMetadata): unknown { + const obj: any = {}; + message.sinkId !== undefined && (obj.sinkId = message.sinkId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateSinkMetadata { + const message = { ...baseUpdateSinkMetadata } as UpdateSinkMetadata; + message.sinkId = object.sinkId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateSinkMetadata.$type, UpdateSinkMetadata); + +const baseDeleteSinkRequest: object = { + $type: "yandex.cloud.logging.v1.DeleteSinkRequest", + sinkId: "", +}; + +export const DeleteSinkRequest = { + $type: "yandex.cloud.logging.v1.DeleteSinkRequest" as const, + + encode( + message: DeleteSinkRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sinkId !== "") { + writer.uint32(10).string(message.sinkId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteSinkRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteSinkRequest } as DeleteSinkRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sinkId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteSinkRequest { + const message = { ...baseDeleteSinkRequest } as DeleteSinkRequest; + message.sinkId = + object.sinkId !== undefined && object.sinkId !== null + ? String(object.sinkId) + : ""; + return message; + }, + + toJSON(message: DeleteSinkRequest): unknown { + const obj: any = {}; + message.sinkId !== undefined && (obj.sinkId = message.sinkId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteSinkRequest { + const message = { ...baseDeleteSinkRequest } as DeleteSinkRequest; + message.sinkId = object.sinkId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteSinkRequest.$type, DeleteSinkRequest); + +const baseDeleteSinkMetadata: object = { + $type: "yandex.cloud.logging.v1.DeleteSinkMetadata", + sinkId: "", +}; + +export const DeleteSinkMetadata = { + $type: "yandex.cloud.logging.v1.DeleteSinkMetadata" as const, + + encode( + message: DeleteSinkMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sinkId !== "") { + writer.uint32(10).string(message.sinkId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteSinkMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteSinkMetadata } as DeleteSinkMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sinkId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteSinkMetadata { + const message = { ...baseDeleteSinkMetadata } as DeleteSinkMetadata; + message.sinkId = + object.sinkId !== undefined && object.sinkId !== null + ? String(object.sinkId) + : ""; + return message; + }, + + toJSON(message: DeleteSinkMetadata): unknown { + const obj: any = {}; + message.sinkId !== undefined && (obj.sinkId = message.sinkId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteSinkMetadata { + const message = { ...baseDeleteSinkMetadata } as DeleteSinkMetadata; + message.sinkId = object.sinkId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteSinkMetadata.$type, DeleteSinkMetadata); + +const baseListSinkOperationsRequest: object = { + $type: "yandex.cloud.logging.v1.ListSinkOperationsRequest", + sinkId: "", + pageSize: 0, + pageToken: "", + filter: "", +}; + +export const ListSinkOperationsRequest = { + $type: "yandex.cloud.logging.v1.ListSinkOperationsRequest" as const, + + encode( + message: ListSinkOperationsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sinkId !== "") { + writer.uint32(10).string(message.sinkId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListSinkOperationsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListSinkOperationsRequest, + } as ListSinkOperationsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sinkId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSinkOperationsRequest { + const message = { + ...baseListSinkOperationsRequest, + } as ListSinkOperationsRequest; + message.sinkId = + object.sinkId !== undefined && object.sinkId !== null + ? String(object.sinkId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListSinkOperationsRequest): unknown { + const obj: any = {}; + message.sinkId !== undefined && (obj.sinkId = message.sinkId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListSinkOperationsRequest { + const message = { + ...baseListSinkOperationsRequest, + } as ListSinkOperationsRequest; + message.sinkId = object.sinkId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListSinkOperationsRequest.$type, + ListSinkOperationsRequest +); + +const baseListSinkOperationsResponse: object = { + $type: "yandex.cloud.logging.v1.ListSinkOperationsResponse", + nextPageToken: "", +}; + +export const ListSinkOperationsResponse = { + $type: "yandex.cloud.logging.v1.ListSinkOperationsResponse" as const, + + encode( + message: ListSinkOperationsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.operations) { + Operation.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListSinkOperationsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListSinkOperationsResponse, + } as ListSinkOperationsResponse; + message.operations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operations.push(Operation.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSinkOperationsResponse { + const message = { + ...baseListSinkOperationsResponse, + } as ListSinkOperationsResponse; + message.operations = (object.operations ?? []).map((e: any) => + Operation.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListSinkOperationsResponse): unknown { + const obj: any = {}; + if (message.operations) { + obj.operations = message.operations.map((e) => + e ? Operation.toJSON(e) : undefined + ); + } else { + obj.operations = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListSinkOperationsResponse { + const message = { + ...baseListSinkOperationsResponse, + } as ListSinkOperationsResponse; + message.operations = + object.operations?.map((e) => Operation.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListSinkOperationsResponse.$type, + ListSinkOperationsResponse +); + +/** A set of methods for managing log sinks. */ +export const SinkServiceService = { + /** + * Returns the specified sink. + * + * To get the list of all available sinks, make a [List] request. + */ + get: { + path: "/yandex.cloud.logging.v1.SinkService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetSinkRequest) => + Buffer.from(GetSinkRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetSinkRequest.decode(value), + responseSerialize: (value: Sink) => + Buffer.from(Sink.encode(value).finish()), + responseDeserialize: (value: Buffer) => Sink.decode(value), + }, + /** Retrieves the list of sinks in the specified folder. */ + list: { + path: "/yandex.cloud.logging.v1.SinkService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListSinksRequest) => + Buffer.from(ListSinksRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListSinksRequest.decode(value), + responseSerialize: (value: ListSinksResponse) => + Buffer.from(ListSinksResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListSinksResponse.decode(value), + }, + /** Creates a sink in the specified folder. */ + create: { + path: "/yandex.cloud.logging.v1.SinkService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateSinkRequest) => + Buffer.from(CreateSinkRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateSinkRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified sink. */ + update: { + path: "/yandex.cloud.logging.v1.SinkService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateSinkRequest) => + Buffer.from(UpdateSinkRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateSinkRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified sink. */ + delete: { + path: "/yandex.cloud.logging.v1.SinkService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteSinkRequest) => + Buffer.from(DeleteSinkRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteSinkRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Lists operations for the specified sink. */ + listOperations: { + path: "/yandex.cloud.logging.v1.SinkService/ListOperations", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListSinkOperationsRequest) => + Buffer.from(ListSinkOperationsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListSinkOperationsRequest.decode(value), + responseSerialize: (value: ListSinkOperationsResponse) => + Buffer.from(ListSinkOperationsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListSinkOperationsResponse.decode(value), + }, + /** Lists existing access bindings for the specified sink. */ + listAccessBindings: { + path: "/yandex.cloud.logging.v1.SinkService/ListAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAccessBindingsRequest) => + Buffer.from(ListAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListAccessBindingsRequest.decode(value), + responseSerialize: (value: ListAccessBindingsResponse) => + Buffer.from(ListAccessBindingsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListAccessBindingsResponse.decode(value), + }, + /** Sets access bindings for the specified sink. */ + setAccessBindings: { + path: "/yandex.cloud.logging.v1.SinkService/SetAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: SetAccessBindingsRequest) => + Buffer.from(SetAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + SetAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates access bindings for the specified sink. */ + updateAccessBindings: { + path: "/yandex.cloud.logging.v1.SinkService/UpdateAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAccessBindingsRequest) => + Buffer.from(UpdateAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface SinkServiceServer extends UntypedServiceImplementation { + /** + * Returns the specified sink. + * + * To get the list of all available sinks, make a [List] request. + */ + get: handleUnaryCall; + /** Retrieves the list of sinks in the specified folder. */ + list: handleUnaryCall; + /** Creates a sink in the specified folder. */ + create: handleUnaryCall; + /** Updates the specified sink. */ + update: handleUnaryCall; + /** Deletes the specified sink. */ + delete: handleUnaryCall; + /** Lists operations for the specified sink. */ + listOperations: handleUnaryCall< + ListSinkOperationsRequest, + ListSinkOperationsResponse + >; + /** Lists existing access bindings for the specified sink. */ + listAccessBindings: handleUnaryCall< + ListAccessBindingsRequest, + ListAccessBindingsResponse + >; + /** Sets access bindings for the specified sink. */ + setAccessBindings: handleUnaryCall; + /** Updates access bindings for the specified sink. */ + updateAccessBindings: handleUnaryCall; +} + +export interface SinkServiceClient extends Client { + /** + * Returns the specified sink. + * + * To get the list of all available sinks, make a [List] request. + */ + get( + request: GetSinkRequest, + callback: (error: ServiceError | null, response: Sink) => void + ): ClientUnaryCall; + get( + request: GetSinkRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Sink) => void + ): ClientUnaryCall; + get( + request: GetSinkRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Sink) => void + ): ClientUnaryCall; + /** Retrieves the list of sinks in the specified folder. */ + list( + request: ListSinksRequest, + callback: (error: ServiceError | null, response: ListSinksResponse) => void + ): ClientUnaryCall; + list( + request: ListSinksRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: ListSinksResponse) => void + ): ClientUnaryCall; + list( + request: ListSinksRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: ListSinksResponse) => void + ): ClientUnaryCall; + /** Creates a sink in the specified folder. */ + create( + request: CreateSinkRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateSinkRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateSinkRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified sink. */ + update( + request: UpdateSinkRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateSinkRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateSinkRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified sink. */ + delete( + request: DeleteSinkRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteSinkRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteSinkRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Lists operations for the specified sink. */ + listOperations( + request: ListSinkOperationsRequest, + callback: ( + error: ServiceError | null, + response: ListSinkOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListSinkOperationsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListSinkOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListSinkOperationsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListSinkOperationsResponse + ) => void + ): ClientUnaryCall; + /** Lists existing access bindings for the specified sink. */ + listAccessBindings( + request: ListAccessBindingsRequest, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + /** Sets access bindings for the specified sink. */ + setAccessBindings( + request: SetAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates access bindings for the specified sink. */ + updateAccessBindings( + request: UpdateAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const SinkServiceClient = makeGenericClientConstructor( + SinkServiceService, + "yandex.cloud.logging.v1.SinkService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): SinkServiceClient; + service: typeof SinkServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/marketplace/index.ts b/src/generated/yandex/cloud/marketplace/index.ts index 97f627cc..756eca01 100644 --- a/src/generated/yandex/cloud/marketplace/index.ts +++ b/src/generated/yandex/cloud/marketplace/index.ts @@ -1,2 +1,9 @@ +export * as licensemanager_instance from './licensemanager/v1/instance' +export * as licensemanager_instance_service from './licensemanager/v1/instance_service' +export * as licensemanager_lock from './licensemanager/v1/lock' +export * as licensemanager_lock_service from './licensemanager/v1/lock_service' +export * as licensemanager_template from './licensemanager/v1/template' +export * as metering_image_product_usage_service from './metering/v1/image_product_usage_service' +export * as metering_usage_record from './metering/v1/usage_record' export * as image_product_usage_service from './v1/metering/image_product_usage_service' export * as usage_record from './v1/metering/usage_record' \ No newline at end of file diff --git a/src/generated/yandex/cloud/marketplace/licensemanager/v1/instance.ts b/src/generated/yandex/cloud/marketplace/licensemanager/v1/instance.ts new file mode 100644 index 00000000..d3838be8 --- /dev/null +++ b/src/generated/yandex/cloud/marketplace/licensemanager/v1/instance.ts @@ -0,0 +1,389 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Template } from "../../../../../yandex/cloud/marketplace/licensemanager/v1/template"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; +import { Lock } from "../../../../../yandex/cloud/marketplace/licensemanager/v1/lock"; + +export const protobufPackage = "yandex.cloud.marketplace.licensemanager.v1"; + +export interface Instance { + $type: "yandex.cloud.marketplace.licensemanager.v1.Instance"; + id: string; + cloudId: string; + folderId: string; + templateId: string; + templateVersionId: string; + description: string; + startTime?: Date; + endTime?: Date; + createdAt?: Date; + updatedAt?: Date; + state: Instance_State; + locks: Lock[]; + licenseTemplate?: Template; +} + +export enum Instance_State { + STATE_UNSPECIFIED = 0, + PENDING = 1, + ACTIVE = 2, + CANCELLED = 3, + EXPIRED = 4, + DEPRECATED = 5, + DELETED = 6, + UNRECOGNIZED = -1, +} + +export function instance_StateFromJSON(object: any): Instance_State { + switch (object) { + case 0: + case "STATE_UNSPECIFIED": + return Instance_State.STATE_UNSPECIFIED; + case 1: + case "PENDING": + return Instance_State.PENDING; + case 2: + case "ACTIVE": + return Instance_State.ACTIVE; + case 3: + case "CANCELLED": + return Instance_State.CANCELLED; + case 4: + case "EXPIRED": + return Instance_State.EXPIRED; + case 5: + case "DEPRECATED": + return Instance_State.DEPRECATED; + case 6: + case "DELETED": + return Instance_State.DELETED; + case -1: + case "UNRECOGNIZED": + default: + return Instance_State.UNRECOGNIZED; + } +} + +export function instance_StateToJSON(object: Instance_State): string { + switch (object) { + case Instance_State.STATE_UNSPECIFIED: + return "STATE_UNSPECIFIED"; + case Instance_State.PENDING: + return "PENDING"; + case Instance_State.ACTIVE: + return "ACTIVE"; + case Instance_State.CANCELLED: + return "CANCELLED"; + case Instance_State.EXPIRED: + return "EXPIRED"; + case Instance_State.DEPRECATED: + return "DEPRECATED"; + case Instance_State.DELETED: + return "DELETED"; + default: + return "UNKNOWN"; + } +} + +const baseInstance: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.Instance", + id: "", + cloudId: "", + folderId: "", + templateId: "", + templateVersionId: "", + description: "", + state: 0, +}; + +export const Instance = { + $type: "yandex.cloud.marketplace.licensemanager.v1.Instance" as const, + + encode( + message: Instance, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.cloudId !== "") { + writer.uint32(18).string(message.cloudId); + } + if (message.folderId !== "") { + writer.uint32(26).string(message.folderId); + } + if (message.templateId !== "") { + writer.uint32(34).string(message.templateId); + } + if (message.templateVersionId !== "") { + writer.uint32(42).string(message.templateVersionId); + } + if (message.description !== "") { + writer.uint32(114).string(message.description); + } + if (message.startTime !== undefined) { + Timestamp.encode( + toTimestamp(message.startTime), + writer.uint32(58).fork() + ).ldelim(); + } + if (message.endTime !== undefined) { + Timestamp.encode( + toTimestamp(message.endTime), + writer.uint32(66).fork() + ).ldelim(); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(74).fork() + ).ldelim(); + } + if (message.updatedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.updatedAt), + writer.uint32(82).fork() + ).ldelim(); + } + if (message.state !== 0) { + writer.uint32(88).int32(message.state); + } + for (const v of message.locks) { + Lock.encode(v!, writer.uint32(98).fork()).ldelim(); + } + if (message.licenseTemplate !== undefined) { + Template.encode( + message.licenseTemplate, + writer.uint32(106).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Instance { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseInstance } as Instance; + message.locks = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.cloudId = reader.string(); + break; + case 3: + message.folderId = reader.string(); + break; + case 4: + message.templateId = reader.string(); + break; + case 5: + message.templateVersionId = reader.string(); + break; + case 14: + message.description = reader.string(); + break; + case 7: + message.startTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 8: + message.endTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 9: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 10: + message.updatedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 11: + message.state = reader.int32() as any; + break; + case 12: + message.locks.push(Lock.decode(reader, reader.uint32())); + break; + case 13: + message.licenseTemplate = Template.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Instance { + const message = { ...baseInstance } as Instance; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.cloudId = + object.cloudId !== undefined && object.cloudId !== null + ? String(object.cloudId) + : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.templateId = + object.templateId !== undefined && object.templateId !== null + ? String(object.templateId) + : ""; + message.templateVersionId = + object.templateVersionId !== undefined && + object.templateVersionId !== null + ? String(object.templateVersionId) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.startTime = + object.startTime !== undefined && object.startTime !== null + ? fromJsonTimestamp(object.startTime) + : undefined; + message.endTime = + object.endTime !== undefined && object.endTime !== null + ? fromJsonTimestamp(object.endTime) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.updatedAt = + object.updatedAt !== undefined && object.updatedAt !== null + ? fromJsonTimestamp(object.updatedAt) + : undefined; + message.state = + object.state !== undefined && object.state !== null + ? instance_StateFromJSON(object.state) + : 0; + message.locks = (object.locks ?? []).map((e: any) => Lock.fromJSON(e)); + message.licenseTemplate = + object.licenseTemplate !== undefined && object.licenseTemplate !== null + ? Template.fromJSON(object.licenseTemplate) + : undefined; + return message; + }, + + toJSON(message: Instance): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.cloudId !== undefined && (obj.cloudId = message.cloudId); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.templateId !== undefined && (obj.templateId = message.templateId); + message.templateVersionId !== undefined && + (obj.templateVersionId = message.templateVersionId); + message.description !== undefined && + (obj.description = message.description); + message.startTime !== undefined && + (obj.startTime = message.startTime.toISOString()); + message.endTime !== undefined && + (obj.endTime = message.endTime.toISOString()); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.updatedAt !== undefined && + (obj.updatedAt = message.updatedAt.toISOString()); + message.state !== undefined && + (obj.state = instance_StateToJSON(message.state)); + if (message.locks) { + obj.locks = message.locks.map((e) => (e ? Lock.toJSON(e) : undefined)); + } else { + obj.locks = []; + } + message.licenseTemplate !== undefined && + (obj.licenseTemplate = message.licenseTemplate + ? Template.toJSON(message.licenseTemplate) + : undefined); + return obj; + }, + + fromPartial, I>>(object: I): Instance { + const message = { ...baseInstance } as Instance; + message.id = object.id ?? ""; + message.cloudId = object.cloudId ?? ""; + message.folderId = object.folderId ?? ""; + message.templateId = object.templateId ?? ""; + message.templateVersionId = object.templateVersionId ?? ""; + message.description = object.description ?? ""; + message.startTime = object.startTime ?? undefined; + message.endTime = object.endTime ?? undefined; + message.createdAt = object.createdAt ?? undefined; + message.updatedAt = object.updatedAt ?? undefined; + message.state = object.state ?? 0; + message.locks = object.locks?.map((e) => Lock.fromPartial(e)) || []; + message.licenseTemplate = + object.licenseTemplate !== undefined && object.licenseTemplate !== null + ? Template.fromPartial(object.licenseTemplate) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Instance.$type, Instance); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/marketplace/licensemanager/v1/instance_service.ts b/src/generated/yandex/cloud/marketplace/licensemanager/v1/instance_service.ts new file mode 100644 index 00000000..53d000a8 --- /dev/null +++ b/src/generated/yandex/cloud/marketplace/licensemanager/v1/instance_service.ts @@ -0,0 +1,441 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Instance } from "../../../../../yandex/cloud/marketplace/licensemanager/v1/instance"; + +export const protobufPackage = "yandex.cloud.marketplace.licensemanager.v1"; + +export interface GetInstanceRequest { + $type: "yandex.cloud.marketplace.licensemanager.v1.GetInstanceRequest"; + instanceId: string; +} + +export interface ListInstancesRequest { + $type: "yandex.cloud.marketplace.licensemanager.v1.ListInstancesRequest"; + folderId: string; + pageSize: number; + pageToken: string; + filter: string; + orderBy: string; +} + +export interface ListInstancesResponse { + $type: "yandex.cloud.marketplace.licensemanager.v1.ListInstancesResponse"; + instances: Instance[]; + nextPageToken: string; +} + +const baseGetInstanceRequest: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.GetInstanceRequest", + instanceId: "", +}; + +export const GetInstanceRequest = { + $type: + "yandex.cloud.marketplace.licensemanager.v1.GetInstanceRequest" as const, + + encode( + message: GetInstanceRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetInstanceRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetInstanceRequest } as GetInstanceRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetInstanceRequest { + const message = { ...baseGetInstanceRequest } as GetInstanceRequest; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + return message; + }, + + toJSON(message: GetInstanceRequest): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetInstanceRequest { + const message = { ...baseGetInstanceRequest } as GetInstanceRequest; + message.instanceId = object.instanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetInstanceRequest.$type, GetInstanceRequest); + +const baseListInstancesRequest: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.ListInstancesRequest", + folderId: "", + pageSize: 0, + pageToken: "", + filter: "", + orderBy: "", +}; + +export const ListInstancesRequest = { + $type: + "yandex.cloud.marketplace.licensemanager.v1.ListInstancesRequest" as const, + + encode( + message: ListInstancesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListInstancesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListInstancesRequest } as ListInstancesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.filter = reader.string(); + break; + case 5: + message.orderBy = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListInstancesRequest { + const message = { ...baseListInstancesRequest } as ListInstancesRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; + return message; + }, + + toJSON(message: ListInstancesRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListInstancesRequest { + const message = { ...baseListInstancesRequest } as ListInstancesRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + message.orderBy = object.orderBy ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListInstancesRequest.$type, ListInstancesRequest); + +const baseListInstancesResponse: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.ListInstancesResponse", + nextPageToken: "", +}; + +export const ListInstancesResponse = { + $type: + "yandex.cloud.marketplace.licensemanager.v1.ListInstancesResponse" as const, + + encode( + message: ListInstancesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.instances) { + Instance.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListInstancesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListInstancesResponse } as ListInstancesResponse; + message.instances = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instances.push(Instance.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListInstancesResponse { + const message = { ...baseListInstancesResponse } as ListInstancesResponse; + message.instances = (object.instances ?? []).map((e: any) => + Instance.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListInstancesResponse): unknown { + const obj: any = {}; + if (message.instances) { + obj.instances = message.instances.map((e) => + e ? Instance.toJSON(e) : undefined + ); + } else { + obj.instances = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListInstancesResponse { + const message = { ...baseListInstancesResponse } as ListInstancesResponse; + message.instances = + object.instances?.map((e) => Instance.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListInstancesResponse.$type, ListInstancesResponse); + +export const InstanceServiceService = { + get: { + path: "/yandex.cloud.marketplace.licensemanager.v1.InstanceService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetInstanceRequest) => + Buffer.from(GetInstanceRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetInstanceRequest.decode(value), + responseSerialize: (value: Instance) => + Buffer.from(Instance.encode(value).finish()), + responseDeserialize: (value: Buffer) => Instance.decode(value), + }, + list: { + path: "/yandex.cloud.marketplace.licensemanager.v1.InstanceService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListInstancesRequest) => + Buffer.from(ListInstancesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListInstancesRequest.decode(value), + responseSerialize: (value: ListInstancesResponse) => + Buffer.from(ListInstancesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListInstancesResponse.decode(value), + }, +} as const; + +export interface InstanceServiceServer extends UntypedServiceImplementation { + get: handleUnaryCall; + list: handleUnaryCall; +} + +export interface InstanceServiceClient extends Client { + get( + request: GetInstanceRequest, + callback: (error: ServiceError | null, response: Instance) => void + ): ClientUnaryCall; + get( + request: GetInstanceRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Instance) => void + ): ClientUnaryCall; + get( + request: GetInstanceRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Instance) => void + ): ClientUnaryCall; + list( + request: ListInstancesRequest, + callback: ( + error: ServiceError | null, + response: ListInstancesResponse + ) => void + ): ClientUnaryCall; + list( + request: ListInstancesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListInstancesResponse + ) => void + ): ClientUnaryCall; + list( + request: ListInstancesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListInstancesResponse + ) => void + ): ClientUnaryCall; +} + +export const InstanceServiceClient = makeGenericClientConstructor( + InstanceServiceService, + "yandex.cloud.marketplace.licensemanager.v1.InstanceService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): InstanceServiceClient; + service: typeof InstanceServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/marketplace/licensemanager/v1/lock.ts b/src/generated/yandex/cloud/marketplace/licensemanager/v1/lock.ts new file mode 100644 index 00000000..d80a9bab --- /dev/null +++ b/src/generated/yandex/cloud/marketplace/licensemanager/v1/lock.ts @@ -0,0 +1,284 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.marketplace.licensemanager.v1"; + +export interface Lock { + $type: "yandex.cloud.marketplace.licensemanager.v1.Lock"; + id: string; + instanceId: string; + resourceId: string; + startTime?: Date; + endTime?: Date; + createdAt?: Date; + updatedAt?: Date; + state: Lock_State; +} + +export enum Lock_State { + STATE_UNSPECIFIED = 0, + UNLOCKED = 1, + LOCKED = 2, + DELETED = 3, + UNRECOGNIZED = -1, +} + +export function lock_StateFromJSON(object: any): Lock_State { + switch (object) { + case 0: + case "STATE_UNSPECIFIED": + return Lock_State.STATE_UNSPECIFIED; + case 1: + case "UNLOCKED": + return Lock_State.UNLOCKED; + case 2: + case "LOCKED": + return Lock_State.LOCKED; + case 3: + case "DELETED": + return Lock_State.DELETED; + case -1: + case "UNRECOGNIZED": + default: + return Lock_State.UNRECOGNIZED; + } +} + +export function lock_StateToJSON(object: Lock_State): string { + switch (object) { + case Lock_State.STATE_UNSPECIFIED: + return "STATE_UNSPECIFIED"; + case Lock_State.UNLOCKED: + return "UNLOCKED"; + case Lock_State.LOCKED: + return "LOCKED"; + case Lock_State.DELETED: + return "DELETED"; + default: + return "UNKNOWN"; + } +} + +const baseLock: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.Lock", + id: "", + instanceId: "", + resourceId: "", + state: 0, +}; + +export const Lock = { + $type: "yandex.cloud.marketplace.licensemanager.v1.Lock" as const, + + encode(message: Lock, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.instanceId !== "") { + writer.uint32(18).string(message.instanceId); + } + if (message.resourceId !== "") { + writer.uint32(26).string(message.resourceId); + } + if (message.startTime !== undefined) { + Timestamp.encode( + toTimestamp(message.startTime), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.endTime !== undefined) { + Timestamp.encode( + toTimestamp(message.endTime), + writer.uint32(42).fork() + ).ldelim(); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(50).fork() + ).ldelim(); + } + if (message.updatedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.updatedAt), + writer.uint32(58).fork() + ).ldelim(); + } + if (message.state !== 0) { + writer.uint32(64).int32(message.state); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Lock { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLock } as Lock; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.instanceId = reader.string(); + break; + case 3: + message.resourceId = reader.string(); + break; + case 4: + message.startTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.endTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 7: + message.updatedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 8: + message.state = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Lock { + const message = { ...baseLock } as Lock; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + message.startTime = + object.startTime !== undefined && object.startTime !== null + ? fromJsonTimestamp(object.startTime) + : undefined; + message.endTime = + object.endTime !== undefined && object.endTime !== null + ? fromJsonTimestamp(object.endTime) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.updatedAt = + object.updatedAt !== undefined && object.updatedAt !== null + ? fromJsonTimestamp(object.updatedAt) + : undefined; + message.state = + object.state !== undefined && object.state !== null + ? lock_StateFromJSON(object.state) + : 0; + return message; + }, + + toJSON(message: Lock): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + message.startTime !== undefined && + (obj.startTime = message.startTime.toISOString()); + message.endTime !== undefined && + (obj.endTime = message.endTime.toISOString()); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.updatedAt !== undefined && + (obj.updatedAt = message.updatedAt.toISOString()); + message.state !== undefined && + (obj.state = lock_StateToJSON(message.state)); + return obj; + }, + + fromPartial, I>>(object: I): Lock { + const message = { ...baseLock } as Lock; + message.id = object.id ?? ""; + message.instanceId = object.instanceId ?? ""; + message.resourceId = object.resourceId ?? ""; + message.startTime = object.startTime ?? undefined; + message.endTime = object.endTime ?? undefined; + message.createdAt = object.createdAt ?? undefined; + message.updatedAt = object.updatedAt ?? undefined; + message.state = object.state ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Lock.$type, Lock); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/marketplace/licensemanager/v1/lock_service.ts b/src/generated/yandex/cloud/marketplace/licensemanager/v1/lock_service.ts new file mode 100644 index 00000000..ff87e6df --- /dev/null +++ b/src/generated/yandex/cloud/marketplace/licensemanager/v1/lock_service.ts @@ -0,0 +1,813 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Lock } from "../../../../../yandex/cloud/marketplace/licensemanager/v1/lock"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.marketplace.licensemanager.v1"; + +export interface GetLockRequest { + $type: "yandex.cloud.marketplace.licensemanager.v1.GetLockRequest"; + lockId: string; +} + +export interface CreateLockRequest { + $type: "yandex.cloud.marketplace.licensemanager.v1.CreateLockRequest"; + /** license */ + instanceId: string; + resourceId: string; +} + +export interface EnsureLockRequest { + $type: "yandex.cloud.marketplace.licensemanager.v1.EnsureLockRequest"; + /** license */ + instanceId: string; + resourceId: string; +} + +export interface CreateLockMetadata { + $type: "yandex.cloud.marketplace.licensemanager.v1.CreateLockMetadata"; + lockId: string; +} + +export interface EnsureLockMetadata { + $type: "yandex.cloud.marketplace.licensemanager.v1.EnsureLockMetadata"; + lockId: string; +} + +export interface DeleteLockRequest { + $type: "yandex.cloud.marketplace.licensemanager.v1.DeleteLockRequest"; + lockId: string; +} + +export interface DeleteLockMetadata { + $type: "yandex.cloud.marketplace.licensemanager.v1.DeleteLockMetadata"; + lockId: string; +} + +export interface GetLockByInstanceAndResourceRequest { + $type: "yandex.cloud.marketplace.licensemanager.v1.GetLockByInstanceAndResourceRequest"; + /** license */ + instanceId: string; + resourceId: string; +} + +const baseGetLockRequest: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.GetLockRequest", + lockId: "", +}; + +export const GetLockRequest = { + $type: "yandex.cloud.marketplace.licensemanager.v1.GetLockRequest" as const, + + encode( + message: GetLockRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.lockId !== "") { + writer.uint32(10).string(message.lockId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetLockRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetLockRequest } as GetLockRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lockId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetLockRequest { + const message = { ...baseGetLockRequest } as GetLockRequest; + message.lockId = + object.lockId !== undefined && object.lockId !== null + ? String(object.lockId) + : ""; + return message; + }, + + toJSON(message: GetLockRequest): unknown { + const obj: any = {}; + message.lockId !== undefined && (obj.lockId = message.lockId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetLockRequest { + const message = { ...baseGetLockRequest } as GetLockRequest; + message.lockId = object.lockId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetLockRequest.$type, GetLockRequest); + +const baseCreateLockRequest: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.CreateLockRequest", + instanceId: "", + resourceId: "", +}; + +export const CreateLockRequest = { + $type: + "yandex.cloud.marketplace.licensemanager.v1.CreateLockRequest" as const, + + encode( + message: CreateLockRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + if (message.resourceId !== "") { + writer.uint32(18).string(message.resourceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateLockRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateLockRequest } as CreateLockRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + case 2: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateLockRequest { + const message = { ...baseCreateLockRequest } as CreateLockRequest; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: CreateLockRequest): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateLockRequest { + const message = { ...baseCreateLockRequest } as CreateLockRequest; + message.instanceId = object.instanceId ?? ""; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateLockRequest.$type, CreateLockRequest); + +const baseEnsureLockRequest: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.EnsureLockRequest", + instanceId: "", + resourceId: "", +}; + +export const EnsureLockRequest = { + $type: + "yandex.cloud.marketplace.licensemanager.v1.EnsureLockRequest" as const, + + encode( + message: EnsureLockRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + if (message.resourceId !== "") { + writer.uint32(18).string(message.resourceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): EnsureLockRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEnsureLockRequest } as EnsureLockRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + case 2: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EnsureLockRequest { + const message = { ...baseEnsureLockRequest } as EnsureLockRequest; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: EnsureLockRequest): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): EnsureLockRequest { + const message = { ...baseEnsureLockRequest } as EnsureLockRequest; + message.instanceId = object.instanceId ?? ""; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(EnsureLockRequest.$type, EnsureLockRequest); + +const baseCreateLockMetadata: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.CreateLockMetadata", + lockId: "", +}; + +export const CreateLockMetadata = { + $type: + "yandex.cloud.marketplace.licensemanager.v1.CreateLockMetadata" as const, + + encode( + message: CreateLockMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.lockId !== "") { + writer.uint32(10).string(message.lockId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateLockMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateLockMetadata } as CreateLockMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lockId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateLockMetadata { + const message = { ...baseCreateLockMetadata } as CreateLockMetadata; + message.lockId = + object.lockId !== undefined && object.lockId !== null + ? String(object.lockId) + : ""; + return message; + }, + + toJSON(message: CreateLockMetadata): unknown { + const obj: any = {}; + message.lockId !== undefined && (obj.lockId = message.lockId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateLockMetadata { + const message = { ...baseCreateLockMetadata } as CreateLockMetadata; + message.lockId = object.lockId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateLockMetadata.$type, CreateLockMetadata); + +const baseEnsureLockMetadata: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.EnsureLockMetadata", + lockId: "", +}; + +export const EnsureLockMetadata = { + $type: + "yandex.cloud.marketplace.licensemanager.v1.EnsureLockMetadata" as const, + + encode( + message: EnsureLockMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.lockId !== "") { + writer.uint32(10).string(message.lockId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): EnsureLockMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEnsureLockMetadata } as EnsureLockMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lockId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EnsureLockMetadata { + const message = { ...baseEnsureLockMetadata } as EnsureLockMetadata; + message.lockId = + object.lockId !== undefined && object.lockId !== null + ? String(object.lockId) + : ""; + return message; + }, + + toJSON(message: EnsureLockMetadata): unknown { + const obj: any = {}; + message.lockId !== undefined && (obj.lockId = message.lockId); + return obj; + }, + + fromPartial, I>>( + object: I + ): EnsureLockMetadata { + const message = { ...baseEnsureLockMetadata } as EnsureLockMetadata; + message.lockId = object.lockId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(EnsureLockMetadata.$type, EnsureLockMetadata); + +const baseDeleteLockRequest: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.DeleteLockRequest", + lockId: "", +}; + +export const DeleteLockRequest = { + $type: + "yandex.cloud.marketplace.licensemanager.v1.DeleteLockRequest" as const, + + encode( + message: DeleteLockRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.lockId !== "") { + writer.uint32(10).string(message.lockId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteLockRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteLockRequest } as DeleteLockRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lockId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteLockRequest { + const message = { ...baseDeleteLockRequest } as DeleteLockRequest; + message.lockId = + object.lockId !== undefined && object.lockId !== null + ? String(object.lockId) + : ""; + return message; + }, + + toJSON(message: DeleteLockRequest): unknown { + const obj: any = {}; + message.lockId !== undefined && (obj.lockId = message.lockId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteLockRequest { + const message = { ...baseDeleteLockRequest } as DeleteLockRequest; + message.lockId = object.lockId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteLockRequest.$type, DeleteLockRequest); + +const baseDeleteLockMetadata: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.DeleteLockMetadata", + lockId: "", +}; + +export const DeleteLockMetadata = { + $type: + "yandex.cloud.marketplace.licensemanager.v1.DeleteLockMetadata" as const, + + encode( + message: DeleteLockMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.lockId !== "") { + writer.uint32(10).string(message.lockId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteLockMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteLockMetadata } as DeleteLockMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lockId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteLockMetadata { + const message = { ...baseDeleteLockMetadata } as DeleteLockMetadata; + message.lockId = + object.lockId !== undefined && object.lockId !== null + ? String(object.lockId) + : ""; + return message; + }, + + toJSON(message: DeleteLockMetadata): unknown { + const obj: any = {}; + message.lockId !== undefined && (obj.lockId = message.lockId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteLockMetadata { + const message = { ...baseDeleteLockMetadata } as DeleteLockMetadata; + message.lockId = object.lockId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteLockMetadata.$type, DeleteLockMetadata); + +const baseGetLockByInstanceAndResourceRequest: object = { + $type: + "yandex.cloud.marketplace.licensemanager.v1.GetLockByInstanceAndResourceRequest", + instanceId: "", + resourceId: "", +}; + +export const GetLockByInstanceAndResourceRequest = { + $type: + "yandex.cloud.marketplace.licensemanager.v1.GetLockByInstanceAndResourceRequest" as const, + + encode( + message: GetLockByInstanceAndResourceRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + if (message.resourceId !== "") { + writer.uint32(18).string(message.resourceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetLockByInstanceAndResourceRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetLockByInstanceAndResourceRequest, + } as GetLockByInstanceAndResourceRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + case 2: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetLockByInstanceAndResourceRequest { + const message = { + ...baseGetLockByInstanceAndResourceRequest, + } as GetLockByInstanceAndResourceRequest; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: GetLockByInstanceAndResourceRequest): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): GetLockByInstanceAndResourceRequest { + const message = { + ...baseGetLockByInstanceAndResourceRequest, + } as GetLockByInstanceAndResourceRequest; + message.instanceId = object.instanceId ?? ""; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GetLockByInstanceAndResourceRequest.$type, + GetLockByInstanceAndResourceRequest +); + +export const LockServiceService = { + get: { + path: "/yandex.cloud.marketplace.licensemanager.v1.LockService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetLockRequest) => + Buffer.from(GetLockRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetLockRequest.decode(value), + responseSerialize: (value: Lock) => + Buffer.from(Lock.encode(value).finish()), + responseDeserialize: (value: Buffer) => Lock.decode(value), + }, + getByInstanceAndResource: { + path: "/yandex.cloud.marketplace.licensemanager.v1.LockService/GetByInstanceAndResource", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetLockByInstanceAndResourceRequest) => + Buffer.from(GetLockByInstanceAndResourceRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + GetLockByInstanceAndResourceRequest.decode(value), + responseSerialize: (value: Lock) => + Buffer.from(Lock.encode(value).finish()), + responseDeserialize: (value: Buffer) => Lock.decode(value), + }, + create: { + path: "/yandex.cloud.marketplace.licensemanager.v1.LockService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateLockRequest) => + Buffer.from(CreateLockRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateLockRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + ensure: { + path: "/yandex.cloud.marketplace.licensemanager.v1.LockService/Ensure", + requestStream: false, + responseStream: false, + requestSerialize: (value: EnsureLockRequest) => + Buffer.from(EnsureLockRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => EnsureLockRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + delete: { + path: "/yandex.cloud.marketplace.licensemanager.v1.LockService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteLockRequest) => + Buffer.from(DeleteLockRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteLockRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface LockServiceServer extends UntypedServiceImplementation { + get: handleUnaryCall; + getByInstanceAndResource: handleUnaryCall< + GetLockByInstanceAndResourceRequest, + Lock + >; + create: handleUnaryCall; + ensure: handleUnaryCall; + delete: handleUnaryCall; +} + +export interface LockServiceClient extends Client { + get( + request: GetLockRequest, + callback: (error: ServiceError | null, response: Lock) => void + ): ClientUnaryCall; + get( + request: GetLockRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Lock) => void + ): ClientUnaryCall; + get( + request: GetLockRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Lock) => void + ): ClientUnaryCall; + getByInstanceAndResource( + request: GetLockByInstanceAndResourceRequest, + callback: (error: ServiceError | null, response: Lock) => void + ): ClientUnaryCall; + getByInstanceAndResource( + request: GetLockByInstanceAndResourceRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Lock) => void + ): ClientUnaryCall; + getByInstanceAndResource( + request: GetLockByInstanceAndResourceRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Lock) => void + ): ClientUnaryCall; + create( + request: CreateLockRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateLockRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateLockRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + ensure( + request: EnsureLockRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + ensure( + request: EnsureLockRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + ensure( + request: EnsureLockRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteLockRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteLockRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteLockRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const LockServiceClient = makeGenericClientConstructor( + LockServiceService, + "yandex.cloud.marketplace.licensemanager.v1.LockService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): LockServiceClient; + service: typeof LockServiceService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/marketplace/licensemanager/v1/template.ts b/src/generated/yandex/cloud/marketplace/licensemanager/v1/template.ts new file mode 100644 index 00000000..7d66bb01 --- /dev/null +++ b/src/generated/yandex/cloud/marketplace/licensemanager/v1/template.ts @@ -0,0 +1,327 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.marketplace.licensemanager.v1"; + +export interface Template { + $type: "yandex.cloud.marketplace.licensemanager.v1.Template"; + id: string; + versionId: string; + name: string; + publisherId: string; + productId: string; + tariffId: string; + licenseSkuId: string; + period: string; + createdAt?: Date; + updatedAt?: Date; + state: Template_State; +} + +export enum Template_State { + STATE_UNSPECIFIED = 0, + PENDING = 1, + ACTIVE = 2, + DEPRECATED = 3, + DELETED = 4, + UNRECOGNIZED = -1, +} + +export function template_StateFromJSON(object: any): Template_State { + switch (object) { + case 0: + case "STATE_UNSPECIFIED": + return Template_State.STATE_UNSPECIFIED; + case 1: + case "PENDING": + return Template_State.PENDING; + case 2: + case "ACTIVE": + return Template_State.ACTIVE; + case 3: + case "DEPRECATED": + return Template_State.DEPRECATED; + case 4: + case "DELETED": + return Template_State.DELETED; + case -1: + case "UNRECOGNIZED": + default: + return Template_State.UNRECOGNIZED; + } +} + +export function template_StateToJSON(object: Template_State): string { + switch (object) { + case Template_State.STATE_UNSPECIFIED: + return "STATE_UNSPECIFIED"; + case Template_State.PENDING: + return "PENDING"; + case Template_State.ACTIVE: + return "ACTIVE"; + case Template_State.DEPRECATED: + return "DEPRECATED"; + case Template_State.DELETED: + return "DELETED"; + default: + return "UNKNOWN"; + } +} + +const baseTemplate: object = { + $type: "yandex.cloud.marketplace.licensemanager.v1.Template", + id: "", + versionId: "", + name: "", + publisherId: "", + productId: "", + tariffId: "", + licenseSkuId: "", + period: "", + state: 0, +}; + +export const Template = { + $type: "yandex.cloud.marketplace.licensemanager.v1.Template" as const, + + encode( + message: Template, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.versionId !== "") { + writer.uint32(18).string(message.versionId); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.publisherId !== "") { + writer.uint32(34).string(message.publisherId); + } + if (message.productId !== "") { + writer.uint32(42).string(message.productId); + } + if (message.tariffId !== "") { + writer.uint32(50).string(message.tariffId); + } + if (message.licenseSkuId !== "") { + writer.uint32(58).string(message.licenseSkuId); + } + if (message.period !== "") { + writer.uint32(66).string(message.period); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(74).fork() + ).ldelim(); + } + if (message.updatedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.updatedAt), + writer.uint32(82).fork() + ).ldelim(); + } + if (message.state !== 0) { + writer.uint32(88).int32(message.state); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Template { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTemplate } as Template; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.versionId = reader.string(); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.publisherId = reader.string(); + break; + case 5: + message.productId = reader.string(); + break; + case 6: + message.tariffId = reader.string(); + break; + case 7: + message.licenseSkuId = reader.string(); + break; + case 8: + message.period = reader.string(); + break; + case 9: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 10: + message.updatedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 11: + message.state = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Template { + const message = { ...baseTemplate } as Template; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.versionId = + object.versionId !== undefined && object.versionId !== null + ? String(object.versionId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.publisherId = + object.publisherId !== undefined && object.publisherId !== null + ? String(object.publisherId) + : ""; + message.productId = + object.productId !== undefined && object.productId !== null + ? String(object.productId) + : ""; + message.tariffId = + object.tariffId !== undefined && object.tariffId !== null + ? String(object.tariffId) + : ""; + message.licenseSkuId = + object.licenseSkuId !== undefined && object.licenseSkuId !== null + ? String(object.licenseSkuId) + : ""; + message.period = + object.period !== undefined && object.period !== null + ? String(object.period) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.updatedAt = + object.updatedAt !== undefined && object.updatedAt !== null + ? fromJsonTimestamp(object.updatedAt) + : undefined; + message.state = + object.state !== undefined && object.state !== null + ? template_StateFromJSON(object.state) + : 0; + return message; + }, + + toJSON(message: Template): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.versionId !== undefined && (obj.versionId = message.versionId); + message.name !== undefined && (obj.name = message.name); + message.publisherId !== undefined && + (obj.publisherId = message.publisherId); + message.productId !== undefined && (obj.productId = message.productId); + message.tariffId !== undefined && (obj.tariffId = message.tariffId); + message.licenseSkuId !== undefined && + (obj.licenseSkuId = message.licenseSkuId); + message.period !== undefined && (obj.period = message.period); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.updatedAt !== undefined && + (obj.updatedAt = message.updatedAt.toISOString()); + message.state !== undefined && + (obj.state = template_StateToJSON(message.state)); + return obj; + }, + + fromPartial, I>>(object: I): Template { + const message = { ...baseTemplate } as Template; + message.id = object.id ?? ""; + message.versionId = object.versionId ?? ""; + message.name = object.name ?? ""; + message.publisherId = object.publisherId ?? ""; + message.productId = object.productId ?? ""; + message.tariffId = object.tariffId ?? ""; + message.licenseSkuId = object.licenseSkuId ?? ""; + message.period = object.period ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.updatedAt = object.updatedAt ?? undefined; + message.state = object.state ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Template.$type, Template); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/marketplace/metering/v1/image_product_usage_service.ts b/src/generated/yandex/cloud/marketplace/metering/v1/image_product_usage_service.ts new file mode 100644 index 00000000..41af71f4 --- /dev/null +++ b/src/generated/yandex/cloud/marketplace/metering/v1/image_product_usage_service.ts @@ -0,0 +1,354 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + UsageRecord, + AcceptedUsageRecord, + RejectedUsageRecord, +} from "../../../../../yandex/cloud/marketplace/metering/v1/usage_record"; + +export const protobufPackage = "yandex.cloud.marketplace.metering.v1"; + +export interface WriteImageProductUsageRequest { + $type: "yandex.cloud.marketplace.metering.v1.WriteImageProductUsageRequest"; + /** Checks whether you have the access required for the emit usage. */ + validateOnly: boolean; + /** Marketplace Product's ID. */ + productId: string; + /** List of product usage records (up to 25 per request). */ + usageRecords: UsageRecord[]; +} + +export interface WriteImageProductUsageResponse { + $type: "yandex.cloud.marketplace.metering.v1.WriteImageProductUsageResponse"; + /** List of accepted product usage records. */ + accepted: AcceptedUsageRecord[]; + /** List of rejected product usage records (with reason). */ + rejected: RejectedUsageRecord[]; +} + +const baseWriteImageProductUsageRequest: object = { + $type: "yandex.cloud.marketplace.metering.v1.WriteImageProductUsageRequest", + validateOnly: false, + productId: "", +}; + +export const WriteImageProductUsageRequest = { + $type: + "yandex.cloud.marketplace.metering.v1.WriteImageProductUsageRequest" as const, + + encode( + message: WriteImageProductUsageRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.validateOnly === true) { + writer.uint32(8).bool(message.validateOnly); + } + if (message.productId !== "") { + writer.uint32(18).string(message.productId); + } + for (const v of message.usageRecords) { + UsageRecord.encode(v!, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): WriteImageProductUsageRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseWriteImageProductUsageRequest, + } as WriteImageProductUsageRequest; + message.usageRecords = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.validateOnly = reader.bool(); + break; + case 2: + message.productId = reader.string(); + break; + case 3: + message.usageRecords.push( + UsageRecord.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): WriteImageProductUsageRequest { + const message = { + ...baseWriteImageProductUsageRequest, + } as WriteImageProductUsageRequest; + message.validateOnly = + object.validateOnly !== undefined && object.validateOnly !== null + ? Boolean(object.validateOnly) + : false; + message.productId = + object.productId !== undefined && object.productId !== null + ? String(object.productId) + : ""; + message.usageRecords = (object.usageRecords ?? []).map((e: any) => + UsageRecord.fromJSON(e) + ); + return message; + }, + + toJSON(message: WriteImageProductUsageRequest): unknown { + const obj: any = {}; + message.validateOnly !== undefined && + (obj.validateOnly = message.validateOnly); + message.productId !== undefined && (obj.productId = message.productId); + if (message.usageRecords) { + obj.usageRecords = message.usageRecords.map((e) => + e ? UsageRecord.toJSON(e) : undefined + ); + } else { + obj.usageRecords = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): WriteImageProductUsageRequest { + const message = { + ...baseWriteImageProductUsageRequest, + } as WriteImageProductUsageRequest; + message.validateOnly = object.validateOnly ?? false; + message.productId = object.productId ?? ""; + message.usageRecords = + object.usageRecords?.map((e) => UsageRecord.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + WriteImageProductUsageRequest.$type, + WriteImageProductUsageRequest +); + +const baseWriteImageProductUsageResponse: object = { + $type: "yandex.cloud.marketplace.metering.v1.WriteImageProductUsageResponse", +}; + +export const WriteImageProductUsageResponse = { + $type: + "yandex.cloud.marketplace.metering.v1.WriteImageProductUsageResponse" as const, + + encode( + message: WriteImageProductUsageResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.accepted) { + AcceptedUsageRecord.encode(v!, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.rejected) { + RejectedUsageRecord.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): WriteImageProductUsageResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseWriteImageProductUsageResponse, + } as WriteImageProductUsageResponse; + message.accepted = []; + message.rejected = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.accepted.push( + AcceptedUsageRecord.decode(reader, reader.uint32()) + ); + break; + case 2: + message.rejected.push( + RejectedUsageRecord.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): WriteImageProductUsageResponse { + const message = { + ...baseWriteImageProductUsageResponse, + } as WriteImageProductUsageResponse; + message.accepted = (object.accepted ?? []).map((e: any) => + AcceptedUsageRecord.fromJSON(e) + ); + message.rejected = (object.rejected ?? []).map((e: any) => + RejectedUsageRecord.fromJSON(e) + ); + return message; + }, + + toJSON(message: WriteImageProductUsageResponse): unknown { + const obj: any = {}; + if (message.accepted) { + obj.accepted = message.accepted.map((e) => + e ? AcceptedUsageRecord.toJSON(e) : undefined + ); + } else { + obj.accepted = []; + } + if (message.rejected) { + obj.rejected = message.rejected.map((e) => + e ? RejectedUsageRecord.toJSON(e) : undefined + ); + } else { + obj.rejected = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): WriteImageProductUsageResponse { + const message = { + ...baseWriteImageProductUsageResponse, + } as WriteImageProductUsageResponse; + message.accepted = + object.accepted?.map((e) => AcceptedUsageRecord.fromPartial(e)) || []; + message.rejected = + object.rejected?.map((e) => RejectedUsageRecord.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + WriteImageProductUsageResponse.$type, + WriteImageProductUsageResponse +); + +/** A set of methods for managing image product's usage. */ +export const ImageProductUsageServiceService = { + /** Writes image product's usage. Authentication is by user's service account. */ + write: { + path: "/yandex.cloud.marketplace.metering.v1.ImageProductUsageService/Write", + requestStream: false, + responseStream: false, + requestSerialize: (value: WriteImageProductUsageRequest) => + Buffer.from(WriteImageProductUsageRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + WriteImageProductUsageRequest.decode(value), + responseSerialize: (value: WriteImageProductUsageResponse) => + Buffer.from(WriteImageProductUsageResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + WriteImageProductUsageResponse.decode(value), + }, +} as const; + +export interface ImageProductUsageServiceServer + extends UntypedServiceImplementation { + /** Writes image product's usage. Authentication is by user's service account. */ + write: handleUnaryCall< + WriteImageProductUsageRequest, + WriteImageProductUsageResponse + >; +} + +export interface ImageProductUsageServiceClient extends Client { + /** Writes image product's usage. Authentication is by user's service account. */ + write( + request: WriteImageProductUsageRequest, + callback: ( + error: ServiceError | null, + response: WriteImageProductUsageResponse + ) => void + ): ClientUnaryCall; + write( + request: WriteImageProductUsageRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: WriteImageProductUsageResponse + ) => void + ): ClientUnaryCall; + write( + request: WriteImageProductUsageRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: WriteImageProductUsageResponse + ) => void + ): ClientUnaryCall; +} + +export const ImageProductUsageServiceClient = makeGenericClientConstructor( + ImageProductUsageServiceService, + "yandex.cloud.marketplace.metering.v1.ImageProductUsageService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ImageProductUsageServiceClient; + service: typeof ImageProductUsageServiceService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/marketplace/metering/v1/usage_record.ts b/src/generated/yandex/cloud/marketplace/metering/v1/usage_record.ts new file mode 100644 index 00000000..28389a61 --- /dev/null +++ b/src/generated/yandex/cloud/marketplace/metering/v1/usage_record.ts @@ -0,0 +1,422 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.marketplace.metering.v1"; + +export interface UsageRecord { + $type: "yandex.cloud.marketplace.metering.v1.UsageRecord"; + /** Unique identifier of the usage record (UUID format). */ + uuid: string; + /** Consumed Marketplace SKU ID, linked to `UsageRecord.product_id`. */ + skuId: string; + /** Quantity of SKU consumed, measured in `sku.usage_unit` units (e.g. bytes). */ + quantity: number; + /** Timestamp in UTC for which the usage is being reported. */ + timestamp?: Date; +} + +export interface AcceptedUsageRecord { + $type: "yandex.cloud.marketplace.metering.v1.AcceptedUsageRecord"; + /** Unique identifier of the usage record (UUID format). */ + uuid: string; +} + +export interface RejectedUsageRecord { + $type: "yandex.cloud.marketplace.metering.v1.RejectedUsageRecord"; + /** Unique identifier of the usage record (UUID format). */ + uuid: string; + /** The reason of rejection. */ + reason: RejectedUsageRecord_Reason; +} + +export enum RejectedUsageRecord_Reason { + REASON_UNSPECIFIED = 0, + DUPLICATE = 1, + EXPIRED = 2, + INVALID_TIMESTAMP = 3, + INVALID_SKU_ID = 4, + INVALID_PRODUCT_ID = 5, + INVALID_QUANTITY = 6, + INVALID_ID = 7, + UNRECOGNIZED = -1, +} + +export function rejectedUsageRecord_ReasonFromJSON( + object: any +): RejectedUsageRecord_Reason { + switch (object) { + case 0: + case "REASON_UNSPECIFIED": + return RejectedUsageRecord_Reason.REASON_UNSPECIFIED; + case 1: + case "DUPLICATE": + return RejectedUsageRecord_Reason.DUPLICATE; + case 2: + case "EXPIRED": + return RejectedUsageRecord_Reason.EXPIRED; + case 3: + case "INVALID_TIMESTAMP": + return RejectedUsageRecord_Reason.INVALID_TIMESTAMP; + case 4: + case "INVALID_SKU_ID": + return RejectedUsageRecord_Reason.INVALID_SKU_ID; + case 5: + case "INVALID_PRODUCT_ID": + return RejectedUsageRecord_Reason.INVALID_PRODUCT_ID; + case 6: + case "INVALID_QUANTITY": + return RejectedUsageRecord_Reason.INVALID_QUANTITY; + case 7: + case "INVALID_ID": + return RejectedUsageRecord_Reason.INVALID_ID; + case -1: + case "UNRECOGNIZED": + default: + return RejectedUsageRecord_Reason.UNRECOGNIZED; + } +} + +export function rejectedUsageRecord_ReasonToJSON( + object: RejectedUsageRecord_Reason +): string { + switch (object) { + case RejectedUsageRecord_Reason.REASON_UNSPECIFIED: + return "REASON_UNSPECIFIED"; + case RejectedUsageRecord_Reason.DUPLICATE: + return "DUPLICATE"; + case RejectedUsageRecord_Reason.EXPIRED: + return "EXPIRED"; + case RejectedUsageRecord_Reason.INVALID_TIMESTAMP: + return "INVALID_TIMESTAMP"; + case RejectedUsageRecord_Reason.INVALID_SKU_ID: + return "INVALID_SKU_ID"; + case RejectedUsageRecord_Reason.INVALID_PRODUCT_ID: + return "INVALID_PRODUCT_ID"; + case RejectedUsageRecord_Reason.INVALID_QUANTITY: + return "INVALID_QUANTITY"; + case RejectedUsageRecord_Reason.INVALID_ID: + return "INVALID_ID"; + default: + return "UNKNOWN"; + } +} + +const baseUsageRecord: object = { + $type: "yandex.cloud.marketplace.metering.v1.UsageRecord", + uuid: "", + skuId: "", + quantity: 0, +}; + +export const UsageRecord = { + $type: "yandex.cloud.marketplace.metering.v1.UsageRecord" as const, + + encode( + message: UsageRecord, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.uuid !== "") { + writer.uint32(10).string(message.uuid); + } + if (message.skuId !== "") { + writer.uint32(18).string(message.skuId); + } + if (message.quantity !== 0) { + writer.uint32(24).int64(message.quantity); + } + if (message.timestamp !== undefined) { + Timestamp.encode( + toTimestamp(message.timestamp), + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UsageRecord { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUsageRecord } as UsageRecord; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uuid = reader.string(); + break; + case 2: + message.skuId = reader.string(); + break; + case 3: + message.quantity = longToNumber(reader.int64() as Long); + break; + case 4: + message.timestamp = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UsageRecord { + const message = { ...baseUsageRecord } as UsageRecord; + message.uuid = + object.uuid !== undefined && object.uuid !== null + ? String(object.uuid) + : ""; + message.skuId = + object.skuId !== undefined && object.skuId !== null + ? String(object.skuId) + : ""; + message.quantity = + object.quantity !== undefined && object.quantity !== null + ? Number(object.quantity) + : 0; + message.timestamp = + object.timestamp !== undefined && object.timestamp !== null + ? fromJsonTimestamp(object.timestamp) + : undefined; + return message; + }, + + toJSON(message: UsageRecord): unknown { + const obj: any = {}; + message.uuid !== undefined && (obj.uuid = message.uuid); + message.skuId !== undefined && (obj.skuId = message.skuId); + message.quantity !== undefined && + (obj.quantity = Math.round(message.quantity)); + message.timestamp !== undefined && + (obj.timestamp = message.timestamp.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): UsageRecord { + const message = { ...baseUsageRecord } as UsageRecord; + message.uuid = object.uuid ?? ""; + message.skuId = object.skuId ?? ""; + message.quantity = object.quantity ?? 0; + message.timestamp = object.timestamp ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(UsageRecord.$type, UsageRecord); + +const baseAcceptedUsageRecord: object = { + $type: "yandex.cloud.marketplace.metering.v1.AcceptedUsageRecord", + uuid: "", +}; + +export const AcceptedUsageRecord = { + $type: "yandex.cloud.marketplace.metering.v1.AcceptedUsageRecord" as const, + + encode( + message: AcceptedUsageRecord, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.uuid !== "") { + writer.uint32(10).string(message.uuid); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AcceptedUsageRecord { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAcceptedUsageRecord } as AcceptedUsageRecord; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uuid = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AcceptedUsageRecord { + const message = { ...baseAcceptedUsageRecord } as AcceptedUsageRecord; + message.uuid = + object.uuid !== undefined && object.uuid !== null + ? String(object.uuid) + : ""; + return message; + }, + + toJSON(message: AcceptedUsageRecord): unknown { + const obj: any = {}; + message.uuid !== undefined && (obj.uuid = message.uuid); + return obj; + }, + + fromPartial, I>>( + object: I + ): AcceptedUsageRecord { + const message = { ...baseAcceptedUsageRecord } as AcceptedUsageRecord; + message.uuid = object.uuid ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AcceptedUsageRecord.$type, AcceptedUsageRecord); + +const baseRejectedUsageRecord: object = { + $type: "yandex.cloud.marketplace.metering.v1.RejectedUsageRecord", + uuid: "", + reason: 0, +}; + +export const RejectedUsageRecord = { + $type: "yandex.cloud.marketplace.metering.v1.RejectedUsageRecord" as const, + + encode( + message: RejectedUsageRecord, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.uuid !== "") { + writer.uint32(10).string(message.uuid); + } + if (message.reason !== 0) { + writer.uint32(16).int32(message.reason); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RejectedUsageRecord { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRejectedUsageRecord } as RejectedUsageRecord; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uuid = reader.string(); + break; + case 2: + message.reason = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RejectedUsageRecord { + const message = { ...baseRejectedUsageRecord } as RejectedUsageRecord; + message.uuid = + object.uuid !== undefined && object.uuid !== null + ? String(object.uuid) + : ""; + message.reason = + object.reason !== undefined && object.reason !== null + ? rejectedUsageRecord_ReasonFromJSON(object.reason) + : 0; + return message; + }, + + toJSON(message: RejectedUsageRecord): unknown { + const obj: any = {}; + message.uuid !== undefined && (obj.uuid = message.uuid); + message.reason !== undefined && + (obj.reason = rejectedUsageRecord_ReasonToJSON(message.reason)); + return obj; + }, + + fromPartial, I>>( + object: I + ): RejectedUsageRecord { + const message = { ...baseRejectedUsageRecord } as RejectedUsageRecord; + message.uuid = object.uuid ?? ""; + message.reason = object.reason ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(RejectedUsageRecord.$type, RejectedUsageRecord); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/backup.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/backup.ts index 81876027..9a29b33a 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/backup.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/backup.ts @@ -27,6 +27,50 @@ export interface Backup { sourceShardNames: string[]; /** Time when the backup operation was started. */ startedAt?: Date; + /** Size of backup in bytes. */ + size: number; + /** How this backup was created (manual/automatic/etc...). */ + type: Backup_BackupType; +} + +export enum Backup_BackupType { + BACKUP_TYPE_UNSPECIFIED = 0, + /** AUTOMATED - Backup created by automated daily schedule. */ + AUTOMATED = 1, + /** MANUAL - Backup created by user request. */ + MANUAL = 2, + UNRECOGNIZED = -1, +} + +export function backup_BackupTypeFromJSON(object: any): Backup_BackupType { + switch (object) { + case 0: + case "BACKUP_TYPE_UNSPECIFIED": + return Backup_BackupType.BACKUP_TYPE_UNSPECIFIED; + case 1: + case "AUTOMATED": + return Backup_BackupType.AUTOMATED; + case 2: + case "MANUAL": + return Backup_BackupType.MANUAL; + case -1: + case "UNRECOGNIZED": + default: + return Backup_BackupType.UNRECOGNIZED; + } +} + +export function backup_BackupTypeToJSON(object: Backup_BackupType): string { + switch (object) { + case Backup_BackupType.BACKUP_TYPE_UNSPECIFIED: + return "BACKUP_TYPE_UNSPECIFIED"; + case Backup_BackupType.AUTOMATED: + return "AUTOMATED"; + case Backup_BackupType.MANUAL: + return "MANUAL"; + default: + return "UNKNOWN"; + } } const baseBackup: object = { @@ -35,6 +79,8 @@ const baseBackup: object = { folderId: "", sourceClusterId: "", sourceShardNames: "", + size: 0, + type: 0, }; export const Backup = { @@ -68,6 +114,12 @@ export const Backup = { writer.uint32(42).fork() ).ldelim(); } + if (message.size !== 0) { + writer.uint32(56).int64(message.size); + } + if (message.type !== 0) { + writer.uint32(64).int32(message.type); + } return writer; }, @@ -101,6 +153,12 @@ export const Backup = { Timestamp.decode(reader, reader.uint32()) ); break; + case 7: + message.size = longToNumber(reader.int64() as Long); + break; + case 8: + message.type = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -132,6 +190,14 @@ export const Backup = { object.startedAt !== undefined && object.startedAt !== null ? fromJsonTimestamp(object.startedAt) : undefined; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + message.type = + object.type !== undefined && object.type !== null + ? backup_BackupTypeFromJSON(object.type) + : 0; return message; }, @@ -150,6 +216,9 @@ export const Backup = { } message.startedAt !== undefined && (obj.startedAt = message.startedAt.toISOString()); + message.size !== undefined && (obj.size = Math.round(message.size)); + message.type !== undefined && + (obj.type = backup_BackupTypeToJSON(message.type)); return obj; }, @@ -161,12 +230,25 @@ export const Backup = { message.sourceClusterId = object.sourceClusterId ?? ""; message.sourceShardNames = object.sourceShardNames?.map((e) => e) || []; message.startedAt = object.startedAt ?? undefined; + message.size = object.size ?? 0; + message.type = object.type ?? 0; return message; }, }; messageTypeRegistry.set(Backup.$type, Backup); +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + type Builtin = | Date | Function @@ -216,6 +298,13 @@ function fromJsonTimestamp(o: any): Date { } } +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + if (_m0.util.Long !== Long) { _m0.util.Long = Long as any; _m0.configure(); diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts index 594fdcfb..3ff077c8 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts @@ -583,6 +583,7 @@ export interface CloudStorage { moveFactor?: number; dataCacheEnabled?: boolean; dataCacheMaxSize?: number; + preferNotToMerge?: boolean; } const baseCluster: object = { @@ -2387,6 +2388,15 @@ export const CloudStorage = { writer.uint32(34).fork() ).ldelim(); } + if (message.preferNotToMerge !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.preferNotToMerge!, + }, + writer.uint32(42).fork() + ).ldelim(); + } return writer; }, @@ -2418,6 +2428,12 @@ export const CloudStorage = { reader.uint32() ).value; break; + case 5: + message.preferNotToMerge = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2444,6 +2460,10 @@ export const CloudStorage = { object.dataCacheMaxSize !== undefined && object.dataCacheMaxSize !== null ? Number(object.dataCacheMaxSize) : undefined; + message.preferNotToMerge = + object.preferNotToMerge !== undefined && object.preferNotToMerge !== null + ? Boolean(object.preferNotToMerge) + : undefined; return message; }, @@ -2455,6 +2475,8 @@ export const CloudStorage = { (obj.dataCacheEnabled = message.dataCacheEnabled); message.dataCacheMaxSize !== undefined && (obj.dataCacheMaxSize = message.dataCacheMaxSize); + message.preferNotToMerge !== undefined && + (obj.preferNotToMerge = message.preferNotToMerge); return obj; }, @@ -2466,6 +2488,7 @@ export const CloudStorage = { message.moveFactor = object.moveFactor ?? undefined; message.dataCacheEnabled = object.dataCacheEnabled ?? undefined; message.dataCacheMaxSize = object.dataCacheMaxSize ?? undefined; + message.preferNotToMerge = object.preferNotToMerge ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts index 98d9f23e..6598a5ce 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts @@ -31,8 +31,8 @@ import { host_TypeFromJSON, host_TypeToJSON, } from "../../../../../yandex/cloud/mdb/clickhouse/v1/cluster"; -import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { MaintenanceWindow } from "../../../../../yandex/cloud/mdb/clickhouse/v1/maintenance"; +import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { ClickhouseConfig_ExternalDictionary, ClickhouseConfig, @@ -130,6 +130,8 @@ export interface CreateClusterRequest { securityGroupIds: string[]; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; + /** Window of maintenance operations. */ + maintenanceWindow?: MaintenanceWindow; } export interface CreateClusterRequest_LabelsEntry { @@ -1003,6 +1005,36 @@ export interface DeleteClusterShardGroupMetadata { shardGroupName: string; } +export interface ListClusterExternalDictionariesRequest { + $type: "yandex.cloud.mdb.clickhouse.v1.ListClusterExternalDictionariesRequest"; + /** ID of the cluster that the external dictionaries belong to. */ + clusterId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListClusterExternalDictionaryResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the [ListClusterExternalDictionaryResponse.next_page_token] + * returned by a previous list request. + */ + pageToken: string; +} + +export interface ListClusterExternalDictionariesResponse { + $type: "yandex.cloud.mdb.clickhouse.v1.ListClusterExternalDictionariesResponse"; + /** List of ClickHouse Cluster external dictionaries. */ + externalDictionaries: ClickhouseConfig_ExternalDictionary[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListClusterExternalDictionaryRequest.page_size], use the [next_page_token] as the value + * for the [ListClusterExternalDictionaryRequest.page_token] parameter in the next list request. Each subsequent + * list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + export interface CreateClusterExternalDictionaryRequest { $type: "yandex.cloud.mdb.clickhouse.v1.CreateClusterExternalDictionaryRequest"; /** @@ -1470,6 +1502,12 @@ export const CreateClusterRequest = { if (message.deletionProtection === true) { writer.uint32(112).bool(message.deletionProtection); } + if (message.maintenanceWindow !== undefined) { + MaintenanceWindow.encode( + message.maintenanceWindow, + writer.uint32(122).fork() + ).ldelim(); + } return writer; }, @@ -1538,6 +1576,12 @@ export const CreateClusterRequest = { case 14: message.deletionProtection = reader.bool(); break; + case 15: + message.maintenanceWindow = MaintenanceWindow.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -1603,6 +1647,11 @@ export const CreateClusterRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromJSON(object.maintenanceWindow) + : undefined; return message; }, @@ -1656,6 +1705,10 @@ export const CreateClusterRequest = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.maintenanceWindow !== undefined && + (obj.maintenanceWindow = message.maintenanceWindow + ? MaintenanceWindow.toJSON(message.maintenanceWindow) + : undefined); return obj; }, @@ -1690,6 +1743,11 @@ export const CreateClusterRequest = { message.serviceAccountId = object.serviceAccountId ?? ""; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromPartial(object.maintenanceWindow) + : undefined; return message; }, }; @@ -7395,6 +7453,212 @@ messageTypeRegistry.set( DeleteClusterShardGroupMetadata ); +const baseListClusterExternalDictionariesRequest: object = { + $type: + "yandex.cloud.mdb.clickhouse.v1.ListClusterExternalDictionariesRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListClusterExternalDictionariesRequest = { + $type: + "yandex.cloud.mdb.clickhouse.v1.ListClusterExternalDictionariesRequest" as const, + + encode( + message: ListClusterExternalDictionariesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterExternalDictionariesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterExternalDictionariesRequest, + } as ListClusterExternalDictionariesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterExternalDictionariesRequest { + const message = { + ...baseListClusterExternalDictionariesRequest, + } as ListClusterExternalDictionariesRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterExternalDictionariesRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListClusterExternalDictionariesRequest { + const message = { + ...baseListClusterExternalDictionariesRequest, + } as ListClusterExternalDictionariesRequest; + message.clusterId = object.clusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterExternalDictionariesRequest.$type, + ListClusterExternalDictionariesRequest +); + +const baseListClusterExternalDictionariesResponse: object = { + $type: + "yandex.cloud.mdb.clickhouse.v1.ListClusterExternalDictionariesResponse", + nextPageToken: "", +}; + +export const ListClusterExternalDictionariesResponse = { + $type: + "yandex.cloud.mdb.clickhouse.v1.ListClusterExternalDictionariesResponse" as const, + + encode( + message: ListClusterExternalDictionariesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.externalDictionaries) { + ClickhouseConfig_ExternalDictionary.encode( + v!, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterExternalDictionariesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterExternalDictionariesResponse, + } as ListClusterExternalDictionariesResponse; + message.externalDictionaries = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.externalDictionaries.push( + ClickhouseConfig_ExternalDictionary.decode(reader, reader.uint32()) + ); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterExternalDictionariesResponse { + const message = { + ...baseListClusterExternalDictionariesResponse, + } as ListClusterExternalDictionariesResponse; + message.externalDictionaries = (object.externalDictionaries ?? []).map( + (e: any) => ClickhouseConfig_ExternalDictionary.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterExternalDictionariesResponse): unknown { + const obj: any = {}; + if (message.externalDictionaries) { + obj.externalDictionaries = message.externalDictionaries.map((e) => + e ? ClickhouseConfig_ExternalDictionary.toJSON(e) : undefined + ); + } else { + obj.externalDictionaries = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListClusterExternalDictionariesResponse { + const message = { + ...baseListClusterExternalDictionariesResponse, + } as ListClusterExternalDictionariesResponse; + message.externalDictionaries = + object.externalDictionaries?.map((e) => + ClickhouseConfig_ExternalDictionary.fromPartial(e) + ) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterExternalDictionariesResponse.$type, + ListClusterExternalDictionariesResponse +); + const baseCreateClusterExternalDictionaryRequest: object = { $type: "yandex.cloud.mdb.clickhouse.v1.CreateClusterExternalDictionaryRequest", @@ -9057,6 +9321,24 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Retrieves a list of external dictionaries that belong to specified cluster. */ + listExternalDictionaries: { + path: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListExternalDictionaries", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClusterExternalDictionariesRequest) => + Buffer.from( + ListClusterExternalDictionariesRequest.encode(value).finish() + ), + requestDeserialize: (value: Buffer) => + ListClusterExternalDictionariesRequest.decode(value), + responseSerialize: (value: ListClusterExternalDictionariesResponse) => + Buffer.from( + ListClusterExternalDictionariesResponse.encode(value).finish() + ), + responseDeserialize: (value: Buffer) => + ListClusterExternalDictionariesResponse.decode(value), + }, /** Creates an external dictionary for the specified ClickHouse cluster. */ createExternalDictionary: { path: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/CreateExternalDictionary", @@ -9190,6 +9472,11 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { updateShardGroup: handleUnaryCall; /** Deletes the specified shard group. */ deleteShardGroup: handleUnaryCall; + /** Retrieves a list of external dictionaries that belong to specified cluster. */ + listExternalDictionaries: handleUnaryCall< + ListClusterExternalDictionariesRequest, + ListClusterExternalDictionariesResponse + >; /** Creates an external dictionary for the specified ClickHouse cluster. */ createExternalDictionary: handleUnaryCall< CreateClusterExternalDictionaryRequest, @@ -9752,6 +10039,31 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Retrieves a list of external dictionaries that belong to specified cluster. */ + listExternalDictionaries( + request: ListClusterExternalDictionariesRequest, + callback: ( + error: ServiceError | null, + response: ListClusterExternalDictionariesResponse + ) => void + ): ClientUnaryCall; + listExternalDictionaries( + request: ListClusterExternalDictionariesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClusterExternalDictionariesResponse + ) => void + ): ClientUnaryCall; + listExternalDictionaries( + request: ListClusterExternalDictionariesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClusterExternalDictionariesResponse + ) => void + ): ClientUnaryCall; /** Creates an external dictionary for the specified ClickHouse cluster. */ createExternalDictionary( request: CreateClusterExternalDictionaryRequest, diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.ts index 88cf2599..d0cd8569 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.ts @@ -5,6 +5,8 @@ import _m0 from "protobufjs/minimal"; import { Int64Value, BoolValue, + StringValue, + DoubleValue, } from "../../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.clickhouse.v1.config"; @@ -136,8 +138,93 @@ export interface ClickhouseConfig { textLogRetentionTime?: number; /** Logging level for text_log system table. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR. */ textLogLevel: ClickhouseConfig_LogLevel; + opentelemetrySpanLogEnabled?: boolean; backgroundPoolSize?: number; backgroundSchedulePoolSize?: number; + /** + * Sets the number of threads performing background fetches for tables with **ReplicatedMergeTree** engines. Default value: 8. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#background_fetches_pool_size). + */ + backgroundFetchesPoolSize?: number; + backgroundMovePoolSize?: number; + backgroundDistributedSchedulePoolSize?: number; + backgroundBufferFlushSchedulePoolSize?: number; + backgroundMessageBrokerSchedulePoolSize?: number; + /** + * The default database. + * + * To get a list of cluster databases, see [Yandex Managed ClickHouse documentation](https://cloud.yandex.com/en/docs/managed-clickhouse/operations/databases#list-db). + */ + defaultDatabase?: string; + /** + * Sets the memory size (in bytes) for a stack trace at every peak allocation step. Default value: **4194304**. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#total-memory-profiler-step). + */ + totalMemoryProfilerStep?: number; + totalMemoryTrackerSampleProbability?: number; + /** + * The maximum number of threads that will be used for performing a variety of operations (mostly garbage collection) for *MergeTree-engine tables in a background. + * Default: 8 + * Min version: 21.11 + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#background_common_pool_size) + */ + backgroundCommonPoolSize?: number; + /** + * Sets a ratio between the number of threads and the number of background merges and mutations that can be executed concurrently. For example, if the ratio equals to 2 and background_pool_size is set to 16 then ClickHouse can execute 32 background merges concurrently. This is possible, because background operations could be suspended and postponed. This is needed to give small merges more execution priority. You can only increase this ratio at runtime. To lower it you have to restart the server. The same as for background_pool_size setting background_merges_mutations_concurrency_ratio could be applied from the default profile for backward compatibility. + * Default: 2 + * Min_version: 21.11 + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#background_merges_mutations_concurrency_ratio) + */ + backgroundMergesMutationsConcurrencyRatio?: number; + /** + * Default: false + * Min version: 21.9 + */ + queryViewsLogEnabled?: boolean; + /** Default: 0 */ + queryViewsLogRetentionSize?: number; + queryViewsLogRetentionTime?: number; + /** + * Default: false + * Min version: 20.11 + */ + asynchronousMetricLogEnabled?: boolean; + /** Default: 0 */ + asynchronousMetricLogRetentionSize?: number; + asynchronousMetricLogRetentionTime?: number; + /** + * Default: 0 + * Min version: 20.11 + */ + opentelemetrySpanLogRetentionSize?: number; + opentelemetrySpanLogRetentionTime?: number; + /** + * Default: false + * Min version: 21.11 + */ + sessionLogEnabled?: boolean; + /** Default: 0 */ + sessionLogRetentionSize?: number; + sessionLogRetentionTime?: number; + /** + * Default: false + * Min version: 21.9 + */ + zookeeperLogEnabled?: boolean; + /** Default: 0 */ + zookeeperLogRetentionSize?: number; + zookeeperLogRetentionTime?: number; + /** + * Default: false + * Min version: 22.10 + */ + asynchronousInsertLogEnabled?: boolean; + /** Default: 0 */ + asynchronousInsertLogRetentionSize?: number; + asynchronousInsertLogRetentionTime?: number; + geobaseEnabled?: boolean; } export enum ClickhouseConfig_LogLevel { @@ -211,6 +298,8 @@ export interface ClickhouseConfig_MergeTree { partsToDelayInsert?: number; /** If more than this number active parts in single partition, throw 'Too many parts ...' exception. */ partsToThrowInsert?: number; + inactivePartsToDelayInsert?: number; + inactivePartsToThrowInsert?: number; /** How many tasks of merging and mutating parts are allowed simultaneously in ReplicatedMergeTree queue. */ maxReplicatedMergesInQueue?: number; /** @@ -224,6 +313,59 @@ export interface ClickhouseConfig_MergeTree { */ maxBytesToMergeAtMinSpaceInPool?: number; maxBytesToMergeAtMaxSpaceInPool?: number; + /** + * Minimum number of bytes in a data part that can be stored in **Wide** format. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#min_bytes_for_wide_part). + */ + minBytesForWidePart?: number; + /** + * Minimum number of rows in a data part that can be stored in **Wide** format. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#min_bytes_for_wide_part). + */ + minRowsForWidePart?: number; + /** + * Enables or disables complete dropping of data parts where all rows are expired in MergeTree tables. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#ttl_only_drop_parts). + */ + ttlOnlyDropParts?: boolean; + allowRemoteFsZeroCopyReplication?: boolean; + mergeWithTtlTimeout?: number; + mergeWithRecompressionTtlTimeout?: number; + maxPartsInTotal?: number; + maxNumberOfMergesWithTtlInPool?: number; + cleanupDelayPeriod?: number; + numberOfFreeEntriesInPoolToExecuteMutation?: number; + /** + * The 'too many parts' check according to 'parts_to_delay_insert' and 'parts_to_throw_insert' will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts. + * Default: 1 GiB + * Min version: 22.10 + * See in-depth description in [ClickHouse GitHub](https://github.com/ClickHouse/ClickHouse/blob/f9558345e886876b9132d9c018e357f7fa9b22a3/src/Storages/MergeTree/MergeTreeSettings.h#L80) + */ + maxAvgPartSizeForTooManyParts?: number; + /** + * Merge parts if every part in the range is older than the value of min_age_to_force_merge_seconds. + * Default: 0 - disabled + * Min_version: 22.10 + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds) + */ + minAgeToForceMergeSeconds?: number; + /** + * Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset. + * Default: false + * Min_version: 22.11 + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds) + */ + minAgeToForceMergeOnPartitionOnly?: boolean; + /** + * Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in background_schedule_pool frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters. + * Default: 5000 + * Min_version: 21.10 + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#merge_selecting_sleep_ms) + */ + mergeSelectingSleepMs?: number; } export interface ClickhouseConfig_Kafka { @@ -232,6 +374,9 @@ export interface ClickhouseConfig_Kafka { saslMechanism: ClickhouseConfig_Kafka_SaslMechanism; saslUsername: string; saslPassword: string; + enableSslCertificateVerification?: boolean; + maxPollIntervalMs?: number; + sessionTimeoutMs?: number; } export enum ClickhouseConfig_Kafka_SecurityProtocol { @@ -350,18 +495,23 @@ export interface ClickhouseConfig_KafkaTopic { export interface ClickhouseConfig_Rabbitmq { $type: "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.Rabbitmq"; + /** [RabbitMQ](https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq/) username */ username: string; + /** [RabbitMQ](https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq/) password */ password: string; + /** [RabbitMQ](https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq/) virtual host */ + vhost: string; } export interface ClickhouseConfig_Compression { $type: "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.Compression"; - /** Compression method to use for the specified combination of `min_part_size` and `min_part_size_ratio`. */ + /** Compression method to use for the specified combination of [min_part_size] and [min_part_size_ratio]. */ method: ClickhouseConfig_Compression_Method; /** Minimum size of a part of a table. */ minPartSize: number; /** Minimum ratio of a part relative to the size of all the data in the table. */ minPartSizeRatio: number; + level?: number; } export enum ClickhouseConfig_Compression_Method { @@ -525,6 +675,7 @@ export interface ClickhouseConfig_ExternalDictionary_MongodbSource { user: string; /** Password of the MongoDB database user. */ password: string; + options: string; } export interface ClickhouseConfig_ExternalDictionary_PostgresqlSource { @@ -1104,6 +1255,15 @@ export const ClickhouseConfig = { if (message.textLogLevel !== 0) { writer.uint32(256).int32(message.textLogLevel); } + if (message.opentelemetrySpanLogEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.opentelemetrySpanLogEnabled!, + }, + writer.uint32(338).fork() + ).ldelim(); + } if (message.backgroundPoolSize !== undefined) { Int64Value.encode( { @@ -1122,6 +1282,255 @@ export const ClickhouseConfig = { writer.uint32(274).fork() ).ldelim(); } + if (message.backgroundFetchesPoolSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backgroundFetchesPoolSize!, + }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.backgroundMovePoolSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backgroundMovePoolSize!, + }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.backgroundDistributedSchedulePoolSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backgroundDistributedSchedulePoolSize!, + }, + writer.uint32(322).fork() + ).ldelim(); + } + if (message.backgroundBufferFlushSchedulePoolSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backgroundBufferFlushSchedulePoolSize!, + }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.backgroundMessageBrokerSchedulePoolSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backgroundMessageBrokerSchedulePoolSize!, + }, + writer.uint32(370).fork() + ).ldelim(); + } + if (message.defaultDatabase !== undefined) { + StringValue.encode( + { + $type: "google.protobuf.StringValue", + value: message.defaultDatabase!, + }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.totalMemoryProfilerStep !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.totalMemoryProfilerStep!, + }, + writer.uint32(354).fork() + ).ldelim(); + } + if (message.totalMemoryTrackerSampleProbability !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.totalMemoryTrackerSampleProbability!, + }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.backgroundCommonPoolSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backgroundCommonPoolSize!, + }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.backgroundMergesMutationsConcurrencyRatio !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backgroundMergesMutationsConcurrencyRatio!, + }, + writer.uint32(386).fork() + ).ldelim(); + } + if (message.queryViewsLogEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.queryViewsLogEnabled!, + }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.queryViewsLogRetentionSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.queryViewsLogRetentionSize!, + }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.queryViewsLogRetentionTime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.queryViewsLogRetentionTime!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.asynchronousMetricLogEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.asynchronousMetricLogEnabled!, + }, + writer.uint32(418).fork() + ).ldelim(); + } + if (message.asynchronousMetricLogRetentionSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.asynchronousMetricLogRetentionSize!, + }, + writer.uint32(426).fork() + ).ldelim(); + } + if (message.asynchronousMetricLogRetentionTime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.asynchronousMetricLogRetentionTime!, + }, + writer.uint32(434).fork() + ).ldelim(); + } + if (message.opentelemetrySpanLogRetentionSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.opentelemetrySpanLogRetentionSize!, + }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.opentelemetrySpanLogRetentionTime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.opentelemetrySpanLogRetentionTime!, + }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.sessionLogEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.sessionLogEnabled!, + }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.sessionLogRetentionSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionLogRetentionSize!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.sessionLogRetentionTime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionLogRetentionTime!, + }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.zookeeperLogEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.zookeeperLogEnabled!, + }, + writer.uint32(482).fork() + ).ldelim(); + } + if (message.zookeeperLogRetentionSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.zookeeperLogRetentionSize!, + }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.zookeeperLogRetentionTime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.zookeeperLogRetentionTime!, + }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.asynchronousInsertLogEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.asynchronousInsertLogEnabled!, + }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.asynchronousInsertLogRetentionSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.asynchronousInsertLogRetentionSize!, + }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.asynchronousInsertLogRetentionTime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.asynchronousInsertLogRetentionTime!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.geobaseEnabled !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geobaseEnabled! }, + writer.uint32(530).fork() + ).ldelim(); + } return writer; }, @@ -1330,6 +1739,12 @@ export const ClickhouseConfig = { case 32: message.textLogLevel = reader.int32() as any; break; + case 42: + message.opentelemetrySpanLogEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; case 33: message.backgroundPoolSize = Int64Value.decode( reader, @@ -1342,6 +1757,174 @@ export const ClickhouseConfig = { reader.uint32() ).value; break; + case 38: + message.backgroundFetchesPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.backgroundMovePoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 40: + message.backgroundDistributedSchedulePoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 41: + message.backgroundBufferFlushSchedulePoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.backgroundMessageBrokerSchedulePoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 43: + message.defaultDatabase = StringValue.decode( + reader, + reader.uint32() + ).value; + break; + case 44: + message.totalMemoryProfilerStep = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 45: + message.totalMemoryTrackerSampleProbability = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 47: + message.backgroundCommonPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 48: + message.backgroundMergesMutationsConcurrencyRatio = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 49: + message.queryViewsLogEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 50: + message.queryViewsLogRetentionSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.queryViewsLogRetentionTime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 52: + message.asynchronousMetricLogEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 53: + message.asynchronousMetricLogRetentionSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 54: + message.asynchronousMetricLogRetentionTime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 55: + message.opentelemetrySpanLogRetentionSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.opentelemetrySpanLogRetentionTime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.sessionLogEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.sessionLogRetentionSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.sessionLogRetentionTime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 60: + message.zookeeperLogEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 61: + message.zookeeperLogRetentionSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.zookeeperLogRetentionTime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.asynchronousInsertLogEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 64: + message.asynchronousInsertLogRetentionSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.asynchronousInsertLogRetentionTime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.geobaseEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -1506,6 +2089,11 @@ export const ClickhouseConfig = { object.textLogLevel !== undefined && object.textLogLevel !== null ? clickhouseConfig_LogLevelFromJSON(object.textLogLevel) : 0; + message.opentelemetrySpanLogEnabled = + object.opentelemetrySpanLogEnabled !== undefined && + object.opentelemetrySpanLogEnabled !== null + ? Boolean(object.opentelemetrySpanLogEnabled) + : undefined; message.backgroundPoolSize = object.backgroundPoolSize !== undefined && object.backgroundPoolSize !== null @@ -1516,6 +2104,144 @@ export const ClickhouseConfig = { object.backgroundSchedulePoolSize !== null ? Number(object.backgroundSchedulePoolSize) : undefined; + message.backgroundFetchesPoolSize = + object.backgroundFetchesPoolSize !== undefined && + object.backgroundFetchesPoolSize !== null + ? Number(object.backgroundFetchesPoolSize) + : undefined; + message.backgroundMovePoolSize = + object.backgroundMovePoolSize !== undefined && + object.backgroundMovePoolSize !== null + ? Number(object.backgroundMovePoolSize) + : undefined; + message.backgroundDistributedSchedulePoolSize = + object.backgroundDistributedSchedulePoolSize !== undefined && + object.backgroundDistributedSchedulePoolSize !== null + ? Number(object.backgroundDistributedSchedulePoolSize) + : undefined; + message.backgroundBufferFlushSchedulePoolSize = + object.backgroundBufferFlushSchedulePoolSize !== undefined && + object.backgroundBufferFlushSchedulePoolSize !== null + ? Number(object.backgroundBufferFlushSchedulePoolSize) + : undefined; + message.backgroundMessageBrokerSchedulePoolSize = + object.backgroundMessageBrokerSchedulePoolSize !== undefined && + object.backgroundMessageBrokerSchedulePoolSize !== null + ? Number(object.backgroundMessageBrokerSchedulePoolSize) + : undefined; + message.defaultDatabase = + object.defaultDatabase !== undefined && object.defaultDatabase !== null + ? String(object.defaultDatabase) + : undefined; + message.totalMemoryProfilerStep = + object.totalMemoryProfilerStep !== undefined && + object.totalMemoryProfilerStep !== null + ? Number(object.totalMemoryProfilerStep) + : undefined; + message.totalMemoryTrackerSampleProbability = + object.totalMemoryTrackerSampleProbability !== undefined && + object.totalMemoryTrackerSampleProbability !== null + ? Number(object.totalMemoryTrackerSampleProbability) + : undefined; + message.backgroundCommonPoolSize = + object.backgroundCommonPoolSize !== undefined && + object.backgroundCommonPoolSize !== null + ? Number(object.backgroundCommonPoolSize) + : undefined; + message.backgroundMergesMutationsConcurrencyRatio = + object.backgroundMergesMutationsConcurrencyRatio !== undefined && + object.backgroundMergesMutationsConcurrencyRatio !== null + ? Number(object.backgroundMergesMutationsConcurrencyRatio) + : undefined; + message.queryViewsLogEnabled = + object.queryViewsLogEnabled !== undefined && + object.queryViewsLogEnabled !== null + ? Boolean(object.queryViewsLogEnabled) + : undefined; + message.queryViewsLogRetentionSize = + object.queryViewsLogRetentionSize !== undefined && + object.queryViewsLogRetentionSize !== null + ? Number(object.queryViewsLogRetentionSize) + : undefined; + message.queryViewsLogRetentionTime = + object.queryViewsLogRetentionTime !== undefined && + object.queryViewsLogRetentionTime !== null + ? Number(object.queryViewsLogRetentionTime) + : undefined; + message.asynchronousMetricLogEnabled = + object.asynchronousMetricLogEnabled !== undefined && + object.asynchronousMetricLogEnabled !== null + ? Boolean(object.asynchronousMetricLogEnabled) + : undefined; + message.asynchronousMetricLogRetentionSize = + object.asynchronousMetricLogRetentionSize !== undefined && + object.asynchronousMetricLogRetentionSize !== null + ? Number(object.asynchronousMetricLogRetentionSize) + : undefined; + message.asynchronousMetricLogRetentionTime = + object.asynchronousMetricLogRetentionTime !== undefined && + object.asynchronousMetricLogRetentionTime !== null + ? Number(object.asynchronousMetricLogRetentionTime) + : undefined; + message.opentelemetrySpanLogRetentionSize = + object.opentelemetrySpanLogRetentionSize !== undefined && + object.opentelemetrySpanLogRetentionSize !== null + ? Number(object.opentelemetrySpanLogRetentionSize) + : undefined; + message.opentelemetrySpanLogRetentionTime = + object.opentelemetrySpanLogRetentionTime !== undefined && + object.opentelemetrySpanLogRetentionTime !== null + ? Number(object.opentelemetrySpanLogRetentionTime) + : undefined; + message.sessionLogEnabled = + object.sessionLogEnabled !== undefined && + object.sessionLogEnabled !== null + ? Boolean(object.sessionLogEnabled) + : undefined; + message.sessionLogRetentionSize = + object.sessionLogRetentionSize !== undefined && + object.sessionLogRetentionSize !== null + ? Number(object.sessionLogRetentionSize) + : undefined; + message.sessionLogRetentionTime = + object.sessionLogRetentionTime !== undefined && + object.sessionLogRetentionTime !== null + ? Number(object.sessionLogRetentionTime) + : undefined; + message.zookeeperLogEnabled = + object.zookeeperLogEnabled !== undefined && + object.zookeeperLogEnabled !== null + ? Boolean(object.zookeeperLogEnabled) + : undefined; + message.zookeeperLogRetentionSize = + object.zookeeperLogRetentionSize !== undefined && + object.zookeeperLogRetentionSize !== null + ? Number(object.zookeeperLogRetentionSize) + : undefined; + message.zookeeperLogRetentionTime = + object.zookeeperLogRetentionTime !== undefined && + object.zookeeperLogRetentionTime !== null + ? Number(object.zookeeperLogRetentionTime) + : undefined; + message.asynchronousInsertLogEnabled = + object.asynchronousInsertLogEnabled !== undefined && + object.asynchronousInsertLogEnabled !== null + ? Boolean(object.asynchronousInsertLogEnabled) + : undefined; + message.asynchronousInsertLogRetentionSize = + object.asynchronousInsertLogRetentionSize !== undefined && + object.asynchronousInsertLogRetentionSize !== null + ? Number(object.asynchronousInsertLogRetentionSize) + : undefined; + message.asynchronousInsertLogRetentionTime = + object.asynchronousInsertLogRetentionTime !== undefined && + object.asynchronousInsertLogRetentionTime !== null + ? Number(object.asynchronousInsertLogRetentionTime) + : undefined; + message.geobaseEnabled = + object.geobaseEnabled !== undefined && object.geobaseEnabled !== null + ? Boolean(object.geobaseEnabled) + : undefined; return message; }, @@ -1618,10 +2344,79 @@ export const ClickhouseConfig = { (obj.textLogLevel = clickhouseConfig_LogLevelToJSON( message.textLogLevel )); + message.opentelemetrySpanLogEnabled !== undefined && + (obj.opentelemetrySpanLogEnabled = message.opentelemetrySpanLogEnabled); message.backgroundPoolSize !== undefined && (obj.backgroundPoolSize = message.backgroundPoolSize); message.backgroundSchedulePoolSize !== undefined && (obj.backgroundSchedulePoolSize = message.backgroundSchedulePoolSize); + message.backgroundFetchesPoolSize !== undefined && + (obj.backgroundFetchesPoolSize = message.backgroundFetchesPoolSize); + message.backgroundMovePoolSize !== undefined && + (obj.backgroundMovePoolSize = message.backgroundMovePoolSize); + message.backgroundDistributedSchedulePoolSize !== undefined && + (obj.backgroundDistributedSchedulePoolSize = + message.backgroundDistributedSchedulePoolSize); + message.backgroundBufferFlushSchedulePoolSize !== undefined && + (obj.backgroundBufferFlushSchedulePoolSize = + message.backgroundBufferFlushSchedulePoolSize); + message.backgroundMessageBrokerSchedulePoolSize !== undefined && + (obj.backgroundMessageBrokerSchedulePoolSize = + message.backgroundMessageBrokerSchedulePoolSize); + message.defaultDatabase !== undefined && + (obj.defaultDatabase = message.defaultDatabase); + message.totalMemoryProfilerStep !== undefined && + (obj.totalMemoryProfilerStep = message.totalMemoryProfilerStep); + message.totalMemoryTrackerSampleProbability !== undefined && + (obj.totalMemoryTrackerSampleProbability = + message.totalMemoryTrackerSampleProbability); + message.backgroundCommonPoolSize !== undefined && + (obj.backgroundCommonPoolSize = message.backgroundCommonPoolSize); + message.backgroundMergesMutationsConcurrencyRatio !== undefined && + (obj.backgroundMergesMutationsConcurrencyRatio = + message.backgroundMergesMutationsConcurrencyRatio); + message.queryViewsLogEnabled !== undefined && + (obj.queryViewsLogEnabled = message.queryViewsLogEnabled); + message.queryViewsLogRetentionSize !== undefined && + (obj.queryViewsLogRetentionSize = message.queryViewsLogRetentionSize); + message.queryViewsLogRetentionTime !== undefined && + (obj.queryViewsLogRetentionTime = message.queryViewsLogRetentionTime); + message.asynchronousMetricLogEnabled !== undefined && + (obj.asynchronousMetricLogEnabled = message.asynchronousMetricLogEnabled); + message.asynchronousMetricLogRetentionSize !== undefined && + (obj.asynchronousMetricLogRetentionSize = + message.asynchronousMetricLogRetentionSize); + message.asynchronousMetricLogRetentionTime !== undefined && + (obj.asynchronousMetricLogRetentionTime = + message.asynchronousMetricLogRetentionTime); + message.opentelemetrySpanLogRetentionSize !== undefined && + (obj.opentelemetrySpanLogRetentionSize = + message.opentelemetrySpanLogRetentionSize); + message.opentelemetrySpanLogRetentionTime !== undefined && + (obj.opentelemetrySpanLogRetentionTime = + message.opentelemetrySpanLogRetentionTime); + message.sessionLogEnabled !== undefined && + (obj.sessionLogEnabled = message.sessionLogEnabled); + message.sessionLogRetentionSize !== undefined && + (obj.sessionLogRetentionSize = message.sessionLogRetentionSize); + message.sessionLogRetentionTime !== undefined && + (obj.sessionLogRetentionTime = message.sessionLogRetentionTime); + message.zookeeperLogEnabled !== undefined && + (obj.zookeeperLogEnabled = message.zookeeperLogEnabled); + message.zookeeperLogRetentionSize !== undefined && + (obj.zookeeperLogRetentionSize = message.zookeeperLogRetentionSize); + message.zookeeperLogRetentionTime !== undefined && + (obj.zookeeperLogRetentionTime = message.zookeeperLogRetentionTime); + message.asynchronousInsertLogEnabled !== undefined && + (obj.asynchronousInsertLogEnabled = message.asynchronousInsertLogEnabled); + message.asynchronousInsertLogRetentionSize !== undefined && + (obj.asynchronousInsertLogRetentionSize = + message.asynchronousInsertLogRetentionSize); + message.asynchronousInsertLogRetentionTime !== undefined && + (obj.asynchronousInsertLogRetentionTime = + message.asynchronousInsertLogRetentionTime); + message.geobaseEnabled !== undefined && + (obj.geobaseEnabled = message.geobaseEnabled); return obj; }, @@ -1688,9 +2483,61 @@ export const ClickhouseConfig = { message.textLogRetentionSize = object.textLogRetentionSize ?? undefined; message.textLogRetentionTime = object.textLogRetentionTime ?? undefined; message.textLogLevel = object.textLogLevel ?? 0; + message.opentelemetrySpanLogEnabled = + object.opentelemetrySpanLogEnabled ?? undefined; message.backgroundPoolSize = object.backgroundPoolSize ?? undefined; message.backgroundSchedulePoolSize = object.backgroundSchedulePoolSize ?? undefined; + message.backgroundFetchesPoolSize = + object.backgroundFetchesPoolSize ?? undefined; + message.backgroundMovePoolSize = object.backgroundMovePoolSize ?? undefined; + message.backgroundDistributedSchedulePoolSize = + object.backgroundDistributedSchedulePoolSize ?? undefined; + message.backgroundBufferFlushSchedulePoolSize = + object.backgroundBufferFlushSchedulePoolSize ?? undefined; + message.backgroundMessageBrokerSchedulePoolSize = + object.backgroundMessageBrokerSchedulePoolSize ?? undefined; + message.defaultDatabase = object.defaultDatabase ?? undefined; + message.totalMemoryProfilerStep = + object.totalMemoryProfilerStep ?? undefined; + message.totalMemoryTrackerSampleProbability = + object.totalMemoryTrackerSampleProbability ?? undefined; + message.backgroundCommonPoolSize = + object.backgroundCommonPoolSize ?? undefined; + message.backgroundMergesMutationsConcurrencyRatio = + object.backgroundMergesMutationsConcurrencyRatio ?? undefined; + message.queryViewsLogEnabled = object.queryViewsLogEnabled ?? undefined; + message.queryViewsLogRetentionSize = + object.queryViewsLogRetentionSize ?? undefined; + message.queryViewsLogRetentionTime = + object.queryViewsLogRetentionTime ?? undefined; + message.asynchronousMetricLogEnabled = + object.asynchronousMetricLogEnabled ?? undefined; + message.asynchronousMetricLogRetentionSize = + object.asynchronousMetricLogRetentionSize ?? undefined; + message.asynchronousMetricLogRetentionTime = + object.asynchronousMetricLogRetentionTime ?? undefined; + message.opentelemetrySpanLogRetentionSize = + object.opentelemetrySpanLogRetentionSize ?? undefined; + message.opentelemetrySpanLogRetentionTime = + object.opentelemetrySpanLogRetentionTime ?? undefined; + message.sessionLogEnabled = object.sessionLogEnabled ?? undefined; + message.sessionLogRetentionSize = + object.sessionLogRetentionSize ?? undefined; + message.sessionLogRetentionTime = + object.sessionLogRetentionTime ?? undefined; + message.zookeeperLogEnabled = object.zookeeperLogEnabled ?? undefined; + message.zookeeperLogRetentionSize = + object.zookeeperLogRetentionSize ?? undefined; + message.zookeeperLogRetentionTime = + object.zookeeperLogRetentionTime ?? undefined; + message.asynchronousInsertLogEnabled = + object.asynchronousInsertLogEnabled ?? undefined; + message.asynchronousInsertLogRetentionSize = + object.asynchronousInsertLogRetentionSize ?? undefined; + message.asynchronousInsertLogRetentionTime = + object.asynchronousInsertLogRetentionTime ?? undefined; + message.geobaseEnabled = object.geobaseEnabled ?? undefined; return message; }, }; @@ -1722,27 +2569,45 @@ export const ClickhouseConfig_MergeTree = { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.replicatedDeduplicationWindowSeconds!, + value: message.replicatedDeduplicationWindowSeconds!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.partsToDelayInsert !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.partsToDelayInsert!, + }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.partsToThrowInsert !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.partsToThrowInsert!, }, - writer.uint32(18).fork() + writer.uint32(34).fork() ).ldelim(); } - if (message.partsToDelayInsert !== undefined) { + if (message.inactivePartsToDelayInsert !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.partsToDelayInsert!, + value: message.inactivePartsToDelayInsert!, }, - writer.uint32(26).fork() + writer.uint32(74).fork() ).ldelim(); } - if (message.partsToThrowInsert !== undefined) { + if (message.inactivePartsToThrowInsert !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.partsToThrowInsert!, + value: message.inactivePartsToThrowInsert!, }, - writer.uint32(34).fork() + writer.uint32(82).fork() ).ldelim(); } if (message.maxReplicatedMergesInQueue !== undefined) { @@ -1781,6 +2646,132 @@ export const ClickhouseConfig_MergeTree = { writer.uint32(66).fork() ).ldelim(); } + if (message.minBytesForWidePart !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.minBytesForWidePart!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.minRowsForWidePart !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.minRowsForWidePart!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.ttlOnlyDropParts !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.ttlOnlyDropParts!, + }, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.allowRemoteFsZeroCopyReplication !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.allowRemoteFsZeroCopyReplication!, + }, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.mergeWithTtlTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.mergeWithTtlTimeout!, + }, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.mergeWithRecompressionTtlTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.mergeWithRecompressionTtlTimeout!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.maxPartsInTotal !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPartsInTotal!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.maxNumberOfMergesWithTtlInPool !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxNumberOfMergesWithTtlInPool!, + }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.cleanupDelayPeriod !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.cleanupDelayPeriod!, + }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.numberOfFreeEntriesInPoolToExecuteMutation !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.numberOfFreeEntriesInPoolToExecuteMutation!, + }, + writer.uint32(162).fork() + ).ldelim(); + } + if (message.maxAvgPartSizeForTooManyParts !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxAvgPartSizeForTooManyParts!, + }, + writer.uint32(170).fork() + ).ldelim(); + } + if (message.minAgeToForceMergeSeconds !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.minAgeToForceMergeSeconds!, + }, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.minAgeToForceMergeOnPartitionOnly !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.minAgeToForceMergeOnPartitionOnly!, + }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.mergeSelectingSleepMs !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.mergeSelectingSleepMs!, + }, + writer.uint32(194).fork() + ).ldelim(); + } return writer; }, @@ -1820,6 +2811,18 @@ export const ClickhouseConfig_MergeTree = { reader.uint32() ).value; break; + case 9: + message.inactivePartsToDelayInsert = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.inactivePartsToThrowInsert = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; case 5: message.maxReplicatedMergesInQueue = Int64Value.decode( reader, @@ -1842,6 +2845,88 @@ export const ClickhouseConfig_MergeTree = { reader.uint32() ).value; break; + case 11: + message.minBytesForWidePart = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.minRowsForWidePart = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.ttlOnlyDropParts = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 14: + message.allowRemoteFsZeroCopyReplication = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 15: + message.mergeWithTtlTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 16: + message.mergeWithRecompressionTtlTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.maxPartsInTotal = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.maxNumberOfMergesWithTtlInPool = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.cleanupDelayPeriod = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.numberOfFreeEntriesInPoolToExecuteMutation = + Int64Value.decode(reader, reader.uint32()).value; + break; + case 21: + message.maxAvgPartSizeForTooManyParts = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 22: + message.minAgeToForceMergeSeconds = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 23: + message.minAgeToForceMergeOnPartitionOnly = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.mergeSelectingSleepMs = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -1874,6 +2959,16 @@ export const ClickhouseConfig_MergeTree = { object.partsToThrowInsert !== null ? Number(object.partsToThrowInsert) : undefined; + message.inactivePartsToDelayInsert = + object.inactivePartsToDelayInsert !== undefined && + object.inactivePartsToDelayInsert !== null + ? Number(object.inactivePartsToDelayInsert) + : undefined; + message.inactivePartsToThrowInsert = + object.inactivePartsToThrowInsert !== undefined && + object.inactivePartsToThrowInsert !== null + ? Number(object.inactivePartsToThrowInsert) + : undefined; message.maxReplicatedMergesInQueue = object.maxReplicatedMergesInQueue !== undefined && object.maxReplicatedMergesInQueue !== null @@ -1894,6 +2989,74 @@ export const ClickhouseConfig_MergeTree = { object.maxBytesToMergeAtMaxSpaceInPool !== null ? Number(object.maxBytesToMergeAtMaxSpaceInPool) : undefined; + message.minBytesForWidePart = + object.minBytesForWidePart !== undefined && + object.minBytesForWidePart !== null + ? Number(object.minBytesForWidePart) + : undefined; + message.minRowsForWidePart = + object.minRowsForWidePart !== undefined && + object.minRowsForWidePart !== null + ? Number(object.minRowsForWidePart) + : undefined; + message.ttlOnlyDropParts = + object.ttlOnlyDropParts !== undefined && object.ttlOnlyDropParts !== null + ? Boolean(object.ttlOnlyDropParts) + : undefined; + message.allowRemoteFsZeroCopyReplication = + object.allowRemoteFsZeroCopyReplication !== undefined && + object.allowRemoteFsZeroCopyReplication !== null + ? Boolean(object.allowRemoteFsZeroCopyReplication) + : undefined; + message.mergeWithTtlTimeout = + object.mergeWithTtlTimeout !== undefined && + object.mergeWithTtlTimeout !== null + ? Number(object.mergeWithTtlTimeout) + : undefined; + message.mergeWithRecompressionTtlTimeout = + object.mergeWithRecompressionTtlTimeout !== undefined && + object.mergeWithRecompressionTtlTimeout !== null + ? Number(object.mergeWithRecompressionTtlTimeout) + : undefined; + message.maxPartsInTotal = + object.maxPartsInTotal !== undefined && object.maxPartsInTotal !== null + ? Number(object.maxPartsInTotal) + : undefined; + message.maxNumberOfMergesWithTtlInPool = + object.maxNumberOfMergesWithTtlInPool !== undefined && + object.maxNumberOfMergesWithTtlInPool !== null + ? Number(object.maxNumberOfMergesWithTtlInPool) + : undefined; + message.cleanupDelayPeriod = + object.cleanupDelayPeriod !== undefined && + object.cleanupDelayPeriod !== null + ? Number(object.cleanupDelayPeriod) + : undefined; + message.numberOfFreeEntriesInPoolToExecuteMutation = + object.numberOfFreeEntriesInPoolToExecuteMutation !== undefined && + object.numberOfFreeEntriesInPoolToExecuteMutation !== null + ? Number(object.numberOfFreeEntriesInPoolToExecuteMutation) + : undefined; + message.maxAvgPartSizeForTooManyParts = + object.maxAvgPartSizeForTooManyParts !== undefined && + object.maxAvgPartSizeForTooManyParts !== null + ? Number(object.maxAvgPartSizeForTooManyParts) + : undefined; + message.minAgeToForceMergeSeconds = + object.minAgeToForceMergeSeconds !== undefined && + object.minAgeToForceMergeSeconds !== null + ? Number(object.minAgeToForceMergeSeconds) + : undefined; + message.minAgeToForceMergeOnPartitionOnly = + object.minAgeToForceMergeOnPartitionOnly !== undefined && + object.minAgeToForceMergeOnPartitionOnly !== null + ? Boolean(object.minAgeToForceMergeOnPartitionOnly) + : undefined; + message.mergeSelectingSleepMs = + object.mergeSelectingSleepMs !== undefined && + object.mergeSelectingSleepMs !== null + ? Number(object.mergeSelectingSleepMs) + : undefined; return message; }, @@ -1909,6 +3072,10 @@ export const ClickhouseConfig_MergeTree = { (obj.partsToDelayInsert = message.partsToDelayInsert); message.partsToThrowInsert !== undefined && (obj.partsToThrowInsert = message.partsToThrowInsert); + message.inactivePartsToDelayInsert !== undefined && + (obj.inactivePartsToDelayInsert = message.inactivePartsToDelayInsert); + message.inactivePartsToThrowInsert !== undefined && + (obj.inactivePartsToThrowInsert = message.inactivePartsToThrowInsert); message.maxReplicatedMergesInQueue !== undefined && (obj.maxReplicatedMergesInQueue = message.maxReplicatedMergesInQueue); message.numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge !== undefined && @@ -1920,6 +3087,40 @@ export const ClickhouseConfig_MergeTree = { message.maxBytesToMergeAtMaxSpaceInPool !== undefined && (obj.maxBytesToMergeAtMaxSpaceInPool = message.maxBytesToMergeAtMaxSpaceInPool); + message.minBytesForWidePart !== undefined && + (obj.minBytesForWidePart = message.minBytesForWidePart); + message.minRowsForWidePart !== undefined && + (obj.minRowsForWidePart = message.minRowsForWidePart); + message.ttlOnlyDropParts !== undefined && + (obj.ttlOnlyDropParts = message.ttlOnlyDropParts); + message.allowRemoteFsZeroCopyReplication !== undefined && + (obj.allowRemoteFsZeroCopyReplication = + message.allowRemoteFsZeroCopyReplication); + message.mergeWithTtlTimeout !== undefined && + (obj.mergeWithTtlTimeout = message.mergeWithTtlTimeout); + message.mergeWithRecompressionTtlTimeout !== undefined && + (obj.mergeWithRecompressionTtlTimeout = + message.mergeWithRecompressionTtlTimeout); + message.maxPartsInTotal !== undefined && + (obj.maxPartsInTotal = message.maxPartsInTotal); + message.maxNumberOfMergesWithTtlInPool !== undefined && + (obj.maxNumberOfMergesWithTtlInPool = + message.maxNumberOfMergesWithTtlInPool); + message.cleanupDelayPeriod !== undefined && + (obj.cleanupDelayPeriod = message.cleanupDelayPeriod); + message.numberOfFreeEntriesInPoolToExecuteMutation !== undefined && + (obj.numberOfFreeEntriesInPoolToExecuteMutation = + message.numberOfFreeEntriesInPoolToExecuteMutation); + message.maxAvgPartSizeForTooManyParts !== undefined && + (obj.maxAvgPartSizeForTooManyParts = + message.maxAvgPartSizeForTooManyParts); + message.minAgeToForceMergeSeconds !== undefined && + (obj.minAgeToForceMergeSeconds = message.minAgeToForceMergeSeconds); + message.minAgeToForceMergeOnPartitionOnly !== undefined && + (obj.minAgeToForceMergeOnPartitionOnly = + message.minAgeToForceMergeOnPartitionOnly); + message.mergeSelectingSleepMs !== undefined && + (obj.mergeSelectingSleepMs = message.mergeSelectingSleepMs); return obj; }, @@ -1935,6 +3136,10 @@ export const ClickhouseConfig_MergeTree = { object.replicatedDeduplicationWindowSeconds ?? undefined; message.partsToDelayInsert = object.partsToDelayInsert ?? undefined; message.partsToThrowInsert = object.partsToThrowInsert ?? undefined; + message.inactivePartsToDelayInsert = + object.inactivePartsToDelayInsert ?? undefined; + message.inactivePartsToThrowInsert = + object.inactivePartsToThrowInsert ?? undefined; message.maxReplicatedMergesInQueue = object.maxReplicatedMergesInQueue ?? undefined; message.numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge = @@ -1943,6 +3148,27 @@ export const ClickhouseConfig_MergeTree = { object.maxBytesToMergeAtMinSpaceInPool ?? undefined; message.maxBytesToMergeAtMaxSpaceInPool = object.maxBytesToMergeAtMaxSpaceInPool ?? undefined; + message.minBytesForWidePart = object.minBytesForWidePart ?? undefined; + message.minRowsForWidePart = object.minRowsForWidePart ?? undefined; + message.ttlOnlyDropParts = object.ttlOnlyDropParts ?? undefined; + message.allowRemoteFsZeroCopyReplication = + object.allowRemoteFsZeroCopyReplication ?? undefined; + message.mergeWithTtlTimeout = object.mergeWithTtlTimeout ?? undefined; + message.mergeWithRecompressionTtlTimeout = + object.mergeWithRecompressionTtlTimeout ?? undefined; + message.maxPartsInTotal = object.maxPartsInTotal ?? undefined; + message.maxNumberOfMergesWithTtlInPool = + object.maxNumberOfMergesWithTtlInPool ?? undefined; + message.cleanupDelayPeriod = object.cleanupDelayPeriod ?? undefined; + message.numberOfFreeEntriesInPoolToExecuteMutation = + object.numberOfFreeEntriesInPoolToExecuteMutation ?? undefined; + message.maxAvgPartSizeForTooManyParts = + object.maxAvgPartSizeForTooManyParts ?? undefined; + message.minAgeToForceMergeSeconds = + object.minAgeToForceMergeSeconds ?? undefined; + message.minAgeToForceMergeOnPartitionOnly = + object.minAgeToForceMergeOnPartitionOnly ?? undefined; + message.mergeSelectingSleepMs = object.mergeSelectingSleepMs ?? undefined; return message; }, }; @@ -1980,6 +3206,33 @@ export const ClickhouseConfig_Kafka = { if (message.saslPassword !== "") { writer.uint32(34).string(message.saslPassword); } + if (message.enableSslCertificateVerification !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableSslCertificateVerification!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maxPollIntervalMs !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPollIntervalMs!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.sessionTimeoutMs !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionTimeoutMs!, + }, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -2005,6 +3258,24 @@ export const ClickhouseConfig_Kafka = { case 4: message.saslPassword = reader.string(); break; + case 5: + message.enableSslCertificateVerification = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.maxPollIntervalMs = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.sessionTimeoutMs = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2033,6 +3304,20 @@ export const ClickhouseConfig_Kafka = { object.saslPassword !== undefined && object.saslPassword !== null ? String(object.saslPassword) : ""; + message.enableSslCertificateVerification = + object.enableSslCertificateVerification !== undefined && + object.enableSslCertificateVerification !== null + ? Boolean(object.enableSslCertificateVerification) + : undefined; + message.maxPollIntervalMs = + object.maxPollIntervalMs !== undefined && + object.maxPollIntervalMs !== null + ? Number(object.maxPollIntervalMs) + : undefined; + message.sessionTimeoutMs = + object.sessionTimeoutMs !== undefined && object.sessionTimeoutMs !== null + ? Number(object.sessionTimeoutMs) + : undefined; return message; }, @@ -2050,6 +3335,13 @@ export const ClickhouseConfig_Kafka = { (obj.saslUsername = message.saslUsername); message.saslPassword !== undefined && (obj.saslPassword = message.saslPassword); + message.enableSslCertificateVerification !== undefined && + (obj.enableSslCertificateVerification = + message.enableSslCertificateVerification); + message.maxPollIntervalMs !== undefined && + (obj.maxPollIntervalMs = message.maxPollIntervalMs); + message.sessionTimeoutMs !== undefined && + (obj.sessionTimeoutMs = message.sessionTimeoutMs); return obj; }, @@ -2061,6 +3353,10 @@ export const ClickhouseConfig_Kafka = { message.saslMechanism = object.saslMechanism ?? 0; message.saslUsername = object.saslUsername ?? ""; message.saslPassword = object.saslPassword ?? ""; + message.enableSslCertificateVerification = + object.enableSslCertificateVerification ?? undefined; + message.maxPollIntervalMs = object.maxPollIntervalMs ?? undefined; + message.sessionTimeoutMs = object.sessionTimeoutMs ?? undefined; return message; }, }; @@ -2170,6 +3466,7 @@ const baseClickhouseConfig_Rabbitmq: object = { $type: "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.Rabbitmq", username: "", password: "", + vhost: "", }; export const ClickhouseConfig_Rabbitmq = { @@ -2186,6 +3483,9 @@ export const ClickhouseConfig_Rabbitmq = { if (message.password !== "") { writer.uint32(18).string(message.password); } + if (message.vhost !== "") { + writer.uint32(26).string(message.vhost); + } return writer; }, @@ -2207,6 +3507,9 @@ export const ClickhouseConfig_Rabbitmq = { case 2: message.password = reader.string(); break; + case 3: + message.vhost = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -2227,6 +3530,10 @@ export const ClickhouseConfig_Rabbitmq = { object.password !== undefined && object.password !== null ? String(object.password) : ""; + message.vhost = + object.vhost !== undefined && object.vhost !== null + ? String(object.vhost) + : ""; return message; }, @@ -2234,6 +3541,7 @@ export const ClickhouseConfig_Rabbitmq = { const obj: any = {}; message.username !== undefined && (obj.username = message.username); message.password !== undefined && (obj.password = message.password); + message.vhost !== undefined && (obj.vhost = message.vhost); return obj; }, @@ -2245,6 +3553,7 @@ export const ClickhouseConfig_Rabbitmq = { } as ClickhouseConfig_Rabbitmq; message.username = object.username ?? ""; message.password = object.password ?? ""; + message.vhost = object.vhost ?? ""; return message; }, }; @@ -2278,6 +3587,12 @@ export const ClickhouseConfig_Compression = { if (message.minPartSizeRatio !== 0) { writer.uint32(25).double(message.minPartSizeRatio); } + if (message.level !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.level! }, + writer.uint32(34).fork() + ).ldelim(); + } return writer; }, @@ -2302,6 +3617,9 @@ export const ClickhouseConfig_Compression = { case 3: message.minPartSizeRatio = reader.double(); break; + case 4: + message.level = Int64Value.decode(reader, reader.uint32()).value; + break; default: reader.skipType(tag & 7); break; @@ -2326,6 +3644,10 @@ export const ClickhouseConfig_Compression = { object.minPartSizeRatio !== undefined && object.minPartSizeRatio !== null ? Number(object.minPartSizeRatio) : 0; + message.level = + object.level !== undefined && object.level !== null + ? Number(object.level) + : undefined; return message; }, @@ -2337,6 +3659,7 @@ export const ClickhouseConfig_Compression = { (obj.minPartSize = Math.round(message.minPartSize)); message.minPartSizeRatio !== undefined && (obj.minPartSizeRatio = message.minPartSizeRatio); + message.level !== undefined && (obj.level = message.level); return obj; }, @@ -2349,6 +3672,7 @@ export const ClickhouseConfig_Compression = { message.method = object.method ?? 0; message.minPartSize = object.minPartSize ?? 0; message.minPartSizeRatio = object.minPartSizeRatio ?? 0; + message.level = object.level ?? undefined; return message; }, }; @@ -3267,6 +4591,7 @@ const baseClickhouseConfig_ExternalDictionary_MongodbSource: object = { port: 0, user: "", password: "", + options: "", }; export const ClickhouseConfig_ExternalDictionary_MongodbSource = { @@ -3295,6 +4620,9 @@ export const ClickhouseConfig_ExternalDictionary_MongodbSource = { if (message.password !== "") { writer.uint32(50).string(message.password); } + if (message.options !== "") { + writer.uint32(58).string(message.options); + } return writer; }, @@ -3328,6 +4656,9 @@ export const ClickhouseConfig_ExternalDictionary_MongodbSource = { case 6: message.password = reader.string(); break; + case 7: + message.options = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -3362,6 +4693,10 @@ export const ClickhouseConfig_ExternalDictionary_MongodbSource = { object.password !== undefined && object.password !== null ? String(object.password) : ""; + message.options = + object.options !== undefined && object.options !== null + ? String(object.options) + : ""; return message; }, @@ -3373,6 +4708,7 @@ export const ClickhouseConfig_ExternalDictionary_MongodbSource = { message.port !== undefined && (obj.port = Math.round(message.port)); message.user !== undefined && (obj.user = message.user); message.password !== undefined && (obj.password = message.password); + message.options !== undefined && (obj.options = message.options); return obj; }, @@ -3391,6 +4727,7 @@ export const ClickhouseConfig_ExternalDictionary_MongodbSource = { message.port = object.port ?? 0; message.user = object.user ?? ""; message.password = object.password ?? ""; + message.options = object.options ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/user.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/user.ts index fce18fd4..81c00bf5 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/user.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/user.ts @@ -2,7 +2,11 @@ import { messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import _m0 from "protobufjs/minimal"; -import { Int64Value, BoolValue } from "../../../../../google/protobuf/wrappers"; +import { + Int64Value, + BoolValue, + DoubleValue, +} from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.clickhouse.v1"; @@ -66,24 +70,25 @@ export interface UserSettings { */ allowDdl?: boolean; /** - * Enables or disables write quorum for ClickHouse cluster. - * If the value is less than **2**, then write quorum is disabled, otherwise it is enabled. - * - * When used, write quorum guarantees that ClickHouse has written data to the quorum of **insert_quorum** replicas with no errors until the [insert_quorum_timeout] expires. - * All replicas in the quorum are in the consistent state, meaning that they contain linearized data from the previous **INSERT** queries. - * Employ write quorum, if you need the guarantees that the written data would not be lost in case of one or more replicas failure. - * - * You can use [select_sequential_consistency] setting to read the data written with write quorum. + * Enables [introspections functions](https://clickhouse.com/docs/en/sql-reference/functions/introspection) for query profiling. * - * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-insert_quorum). + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-allow_introspection_functions). */ - insertQuorum?: number; + allowIntrospectionFunctions?: boolean; /** * Connection timeout in milliseconds. * * Value must be greater than **0** (default: **10000**, 10 seconds). */ connectTimeout?: number; + /** + * The timeout in milliseconds for connecting to a remote server for a Distributed table engine. Applies only if the cluster uses sharding and replication. If unsuccessful, several attempts are made to connect to various replicas. + * + * Default value: **50**. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#connect-timeout-with-failover-ms). + */ + connectTimeoutWithFailover?: number; /** * Receive timeout in milliseconds. * @@ -96,6 +101,25 @@ export interface UserSettings { * Value must be greater than **0** (default: **300000**, 300 seconds or 5 minutes). */ sendTimeout?: number; + /** + * Timeout (in seconds) between checks of execution speed. It is checked that execution speed is not less that specified in [min_execution_speed] parameter. + * + * Default value: **10**. + */ + timeoutBeforeCheckingExecutionSpeed?: number; + /** + * Enables or disables write quorum for ClickHouse cluster. + * If the value is less than **2**, then write quorum is disabled, otherwise it is enabled. + * + * When used, write quorum guarantees that ClickHouse has written data to the quorum of **insert_quorum** replicas with no errors until the [insert_quorum_timeout] expires. + * All replicas in the quorum are in the consistent state, meaning that they contain linearized data from the previous **INSERT** queries. + * Employ write quorum, if you need the guarantees that the written data would not be lost in case of one or more replicas failure. + * + * You can use [select_sequential_consistency] setting to read the data written with write quorum. + * + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-insert_quorum). + */ + insertQuorum?: number; /** * Quorum write timeout in milliseconds. * @@ -105,12 +129,34 @@ export interface UserSettings { * Minimum value: **1000**, 1 second (default: **60000**, 1 minute). */ insertQuorumTimeout?: number; + /** See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-insert_quorum_parallel). */ + insertQuorumParallel?: boolean; + /** + * Enables the insertion of default values instead of NULL into columns with not nullable data type. + * + * Default value: **true**. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#insert_null_as_default). + */ + insertNullAsDefault?: boolean; /** * Determines the behavior of **SELECT** queries from the replicated table: if enabled, ClickHouse will terminate a query with error message in case the replica does not have a chunk written with the quorum and will not read the parts that have not yet been written with the quorum. * * Default value: **false** (sequential consistency is disabled). */ selectSequentialConsistency?: boolean; + /** See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-deduplicate-blocks-in-dependent-materialized-views). */ + deduplicateBlocksInDependentMaterializedViews?: boolean; + /** + * Wait mode for asynchronous actions in **ALTER** queries on replicated tables: + * + * * **0**-do not wait for replicas. + * * **1**-only wait for own execution (default). + * * **2**-wait for all replicas. + * + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/sql-reference/statements/alter/#synchronicity-of-alter-queries). + */ + replicationAlterPartitionsSync?: number; /** * Max replica delay in milliseconds. If a replica lags more than the set value, this replica is not used and becomes a stale one. * @@ -129,16 +175,6 @@ export interface UserSettings { * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-fallback_to_stale_replicas_for_distributed_queries). */ fallbackToStaleReplicasForDistributedQueries?: boolean; - /** - * Wait mode for asynchronous actions in **ALTER** queries on replicated tables: - * - * * **0**-do not wait for replicas. - * * **1**-only wait for own execution (default). - * * **2**-wait for all replicas. - * - * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/sql-reference/statements/alter/#synchronicity-of-alter-queries). - */ - replicationAlterPartitionsSync?: number; /** * Determine the behavior of distributed subqueries. * @@ -168,32 +204,6 @@ export interface UserSettings { * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-skip_unavailable_shards). */ skipUnavailableShards?: boolean; - /** - * Enables or disables query compilation. - * If you execute a lot of structurally identical queries, then enable this setting. - * As a result, such queries may be executed faster due to use of queries' compiled parts. - * - * Use this setting in combination with [min_count_to_compile] setting. - * - * Default value: **false** (compilation is disabled). - * - * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#compile). - */ - compile?: boolean; - /** - * How many structurally identical queries ClickHouse has to encounter before they are compiled. - * - * Minimum value: **0** (default: **3**). - * - * For the **0** value compilation is synchronous: a query waits for compilation process to complete prior to continuing execution. - * It is recommended to set this value only for testing purposes. - * - * For all other values, compilation is asynchronous: the compilation process executes in a separate thread. - * When a compiled part of query is ready, it will be used by ClickHouse for eligible queries, including the ones that are currently running. - * - * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#min-count-to-compile). - */ - minCountToCompile?: number; /** * Enables or disables expression compilation. * If you execute a lot of queries that contain identical expressions, then enable this setting. @@ -401,6 +411,13 @@ export interface UserSettings { * Minimal value and default value: **0**, no limitation is set. */ maxNetworkBandwidthForUser?: number; + /** See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/ru/operations/settings/query-complexity/#max-partitions-per-insert-block). */ + maxPartitionsPerInsertBlock?: number; + /** + * The maximum number of concurrent requests per user. + * Default value: 0 (no limit). + */ + maxConcurrentQueriesForUser?: number; /** * If enabled, query is not executed if the ClickHouse can't use index by date. * This setting has effect only for tables of the MergeTree family. @@ -564,6 +581,10 @@ export interface UserSettings { * Possible values: OVERFLOW_MODE_THROW, OVERFLOW_MODE_BREAK. */ joinOverflowMode: UserSettings_OverflowMode; + /** See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-join_algorithm). */ + joinAlgorithm: UserSettings_JoinAlgorithm[]; + /** See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#any_join_distinct_right_table_keys). */ + anyJoinDistinctRightTableKeys?: boolean; /** * Limits the maximum number of columns that can be read from a table in a single query. * If the query requires to read more columns to complete, then it will be aborted. @@ -655,6 +676,12 @@ export interface UserSettings { * Default value: **true** (replacing is enabled). */ inputFormatDefaultsForOmittedFields?: boolean; + /** See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#input_format_null_as_default). */ + inputFormatNullAsDefault?: boolean; + /** See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#date_time_input_format). */ + dateTimeInputFormat: UserSettings_DateTimeInputFormat; + /** See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#input_format_with_names_use_header). */ + inputFormatWithNamesUseHeader?: boolean; /** * Enables quoting of 64-bit integers in JSON output format. * @@ -670,6 +697,8 @@ export interface UserSettings { * Default value: **false** (special values do not present in output). */ outputFormatJsonQuoteDenormals?: boolean; + /** See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#date_time_output_format). */ + dateTimeOutputFormat: UserSettings_DateTimeOutputFormat; /** * Determines whether to use LowCardinality type in Native format. * @@ -687,6 +716,12 @@ export interface UserSettings { * Default value: **true** (LowCardinality columns are used in Native format). */ lowCardinalityAllowInNativeFormat?: boolean; + /** + * Allows specifying **LowCardinality** modifier for types of small fixed size (8 or less) in CREATE TABLE statements. Enabling this may increase merge times and memory consumption. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#allow_suspicious_low_cardinality_types). + */ + allowSuspiciousLowCardinalityTypes?: boolean; /** * Enables returning of empty result when aggregating without keys (with **GROUP BY** operation absent) on empty set (e.g., **SELECT count(*) FROM table WHERE 0**). * @@ -694,9 +729,6 @@ export interface UserSettings { * * **false** (default)-ClickHouse will return a single-line result consisting of **NULL** values for aggregation functions, in accordance with SQL standard. */ emptyResultForAggregationByEmptySet?: boolean; - joinedSubqueryRequiresAlias?: boolean; - joinUseNulls?: boolean; - transformNullIn?: boolean; /** * HTTP connection timeout, in milliseconds. * @@ -748,8 +780,195 @@ export interface UserSettings { * Default value: **false** (header is not added). */ addHttpCorsHeader?: boolean; + /** + * Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. + * + * Default value: **false**. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#cancel-http-readonly-queries-on-client-close). + */ + cancelHttpReadonlyQueriesOnClientClose?: boolean; + /** + * Limits the maximum number of HTTP GET redirect hops for [URL-engine](https://clickhouse.com/docs/en/engines/table-engines/special/url) tables. + * + * If the parameter is set to **0** (default), no hops is allowed. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#setting-max_http_get_redirects). + */ + maxHttpGetRedirects?: number; + joinedSubqueryRequiresAlias?: boolean; + joinUseNulls?: boolean; + transformNullIn?: boolean; /** Quota accounting mode. Possible values: QUOTA_MODE_DEFAULT, QUOTA_MODE_KEYED and QUOTA_MODE_KEYED_BY_IP. */ quotaMode: UserSettings_QuotaMode; + /** + * Sets the data format of a [nested](https://clickhouse.com/docs/en/sql-reference/data-types/nested-data-structures/nested) columns. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#flatten-nested). + */ + flattenNested?: boolean; + /** Regular expression (for Regexp format) */ + formatRegexp: string; + /** See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#format_regexp_escaping_rule). */ + formatRegexpEscapingRule: UserSettings_FormatRegexpEscapingRule; + /** See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#format_regexp_skip_unmatched). */ + formatRegexpSkipUnmatched?: boolean; + /** + * Enables asynchronous inserts. + * + * Disabled by default. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#async-insert). + */ + asyncInsert?: boolean; + /** + * The maximum number of threads for background data parsing and insertion. + * + * If the parameter is set to **0**, asynchronous insertions are disabled. Default value: **16**. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#async-insert-threads). + */ + asyncInsertThreads?: number; + /** + * Enables waiting for processing of asynchronous insertion. If enabled, server returns OK only after the data is inserted. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#wait-for-async-insert). + */ + waitForAsyncInsert?: boolean; + /** + * The timeout (in seconds) for waiting for processing of asynchronous insertion. + * + * Default value: **120**. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#wait-for-async-insert-timeout). + */ + waitForAsyncInsertTimeout?: number; + /** + * The maximum size of the unparsed data in bytes collected per query before being inserted. + * + * If the parameter is set to **0**, asynchronous insertions are disabled. Default value: **100000**. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#async-insert-max-data-size). + */ + asyncInsertMaxDataSize?: number; + /** + * The maximum timeout in milliseconds since the first INSERT query before inserting collected data. + * + * If the parameter is set to **0**, the timeout is disabled. Default value: **200**. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#async-insert-busy-timeout-ms). + */ + asyncInsertBusyTimeout?: number; + /** + * The maximum timeout in milliseconds since the last INSERT query before dumping collected data. If enabled, the settings prolongs the [async_insert_busy_timeout] with every INSERT query as long as [async_insert_max_data_size] is not exceeded. + * + * More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#async-insert-stale-timeout-ms). + */ + asyncInsertStaleTimeout?: number; + /** + * Memory profiler step (in bytes). + * + * If the next query step requires more memory than this parameter specifies, the memory profiler collects the allocating stack trace. Values lower than a few megabytes slow down query processing. + * + * Default value: **4194304** (4 MB). Zero means disabled memory profiler. + */ + memoryProfilerStep?: number; + /** + * Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. + * + * Possible values: from **0** to **1**. Default: **0**. + */ + memoryProfilerSampleProbability?: number; + /** + * Sets the maximum number of parallel threads for the SELECT query data read phase with the FINAL modifier. + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#max-final-threads). + */ + maxFinalThreads?: number; + /** + * Enables or disables order-preserving parallel parsing of data formats. Supported only for [TSV](https://clickhouse.com/docs/en/interfaces/formats#tabseparated), [TKSV](https://clickhouse.com/docs/en/interfaces/formats#tskv), [CSV](https://clickhouse.com/docs/en/interfaces/formats#csv) and [JSONEachRow](https://clickhouse.com/docs/en/interfaces/formats#jsoneachrow) formats. + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#input-format-parallel-parsing) + */ + inputFormatParallelParsing?: boolean; + /** + * Enables or disables the insertion of JSON data with nested objects. + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#input-format-parallel-parsing) + */ + inputFormatImportNestedJson?: boolean; + /** Method of reading data from local filesystem, one of: read, pread, mmap, io_uring, pread_threadpool. The 'io_uring' method is experimental and does not work for Log, TinyLog, StripeLog, File, Set and Join, and other tables with append-able files in presence of concurrent reads and writes. */ + localFilesystemReadMethod: UserSettings_LocalFilesystemReadMethod; + /** + * The maximum size of the buffer to read from the filesystem. + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/codebrowser/ClickHouse/src/Core/Settings.h.html#DB::SettingsTraits::Data::max_read_buffer_size) + */ + maxReadBufferSize?: number; + /** + * The setting sets the maximum number of retries for ClickHouse Keeper (or ZooKeeper) requests during insert into replicated MergeTree. Only Keeper requests which failed due to network error, Keeper session timeout, or request timeout are considered for retries. + * Default: 20 from 23.2, 0(disabled) before + * Min_version: 22.11 + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#insert_keeper_max_retries) + */ + insertKeeperMaxRetries?: number; + /** + * The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running user queries. Zero means unlimited. + * Default: 0 - unlimited + * Min_version: 22.10 + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity#settings_max_temporary_data_on_disk_size_for_user) + */ + maxTemporaryDataOnDiskSizeForUser?: number; + /** + * The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running queries. Zero means unlimited. + * Default: 0 - unlimited + * Min_version: 22.10 + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity#settings_max_temporary_data_on_disk_size_for_query) + */ + maxTemporaryDataOnDiskSizeForQuery?: number; + /** + * Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size. + * Default: 1000 + * Special: 0 - unlimited + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#max_parser_depth) + */ + maxParserDepth?: number; + /** + * Method of reading data from remote filesystem, one of: read, threadpool. + * Default: read + * Min_version: 21.11 + * See in-depth description in [ClickHouse GitHub](https://github.com/ClickHouse/ClickHouse/blob/f9558345e886876b9132d9c018e357f7fa9b22a3/src/Core/Settings.h#L660) + */ + remoteFilesystemReadMethod: UserSettings_RemoteFilesystemReadMethod; + /** + * It represents soft memory limit in case when hard limit is reached on user level. This value is used to compute overcommit ratio for the query. Zero means skip the query. + * Default: 1GiB + * Min_version: 22.5 + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#memory_overcommit_ratio_denominator) + */ + memoryOvercommitRatioDenominator?: number; + /** + * It represents soft memory limit in case when hard limit is reached on global level. This value is used to compute overcommit ratio for the query. Zero means skip the query. + * Default: 1GiB + * Min_version: 22.5 + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#memory_overcommit_ratio_denominator_for_user) + */ + memoryOvercommitRatioDenominatorForUser?: number; + /** + * Maximum time thread will wait for memory to be freed in the case of memory overcommit on a user level. If the timeout is reached and memory is not freed, an exception is thrown. + * Default: 5000000 + * Min_version: 22.5 + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#memory_usage_overcommit_max_wait_microseconds) + */ + memoryUsageOvercommitMaxWaitMicroseconds?: number; + /** + * The setting is deprecated and has no effect. + * + * @deprecated + */ + compile?: boolean; + /** + * The setting is deprecated and has no effect. + * + * @deprecated + */ + minCountToCompile?: number; } export enum UserSettings_OverflowMode { @@ -1008,87 +1227,417 @@ export function userSettings_CountDistinctImplementationToJSON( } } -/** - * ClickHouse quota representation. Each quota associated with an user and limits it resource usage for an interval. - * See in-depth description [ClickHouse documentation](https://clickhouse.com/docs/en/operations/quotas/). - */ -export interface UserQuota { - $type: "yandex.cloud.mdb.clickhouse.v1.UserQuota"; - /** - * Duration of interval for quota in milliseconds. - * Minimal value is 1 second. - */ - intervalDuration?: number; - /** - * The total number of queries. - * 0 - unlimited. - */ - queries?: number; - /** - * The number of queries that threw exception. - * 0 - unlimited. - */ - errors?: number; - /** - * The total number of rows given as the result.. - * 0 - unlimited. - */ - resultRows?: number; - /** - * The total number of source rows read from tables for running the query, on all remote servers. - * 0 - unlimited. - */ - readRows?: number; - /** - * The total query execution time, in milliseconds (wall time). - * 0 - unlimited. - */ - executionTime?: number; +export enum UserSettings_JoinAlgorithm { + JOIN_ALGORITHM_UNSPECIFIED = 0, + JOIN_ALGORITHM_HASH = 1, + JOIN_ALGORITHM_PARALLEL_HASH = 2, + JOIN_ALGORITHM_PARTIAL_MERGE = 3, + JOIN_ALGORITHM_DIRECT = 4, + JOIN_ALGORITHM_AUTO = 5, + JOIN_ALGORITHM_FULL_SORTING_MERGE = 6, + JOIN_ALGORITHM_PREFER_PARTIAL_MERGE = 7, + UNRECOGNIZED = -1, } -const baseUser: object = { - $type: "yandex.cloud.mdb.clickhouse.v1.User", - name: "", - clusterId: "", -}; +export function userSettings_JoinAlgorithmFromJSON( + object: any +): UserSettings_JoinAlgorithm { + switch (object) { + case 0: + case "JOIN_ALGORITHM_UNSPECIFIED": + return UserSettings_JoinAlgorithm.JOIN_ALGORITHM_UNSPECIFIED; + case 1: + case "JOIN_ALGORITHM_HASH": + return UserSettings_JoinAlgorithm.JOIN_ALGORITHM_HASH; + case 2: + case "JOIN_ALGORITHM_PARALLEL_HASH": + return UserSettings_JoinAlgorithm.JOIN_ALGORITHM_PARALLEL_HASH; + case 3: + case "JOIN_ALGORITHM_PARTIAL_MERGE": + return UserSettings_JoinAlgorithm.JOIN_ALGORITHM_PARTIAL_MERGE; + case 4: + case "JOIN_ALGORITHM_DIRECT": + return UserSettings_JoinAlgorithm.JOIN_ALGORITHM_DIRECT; + case 5: + case "JOIN_ALGORITHM_AUTO": + return UserSettings_JoinAlgorithm.JOIN_ALGORITHM_AUTO; + case 6: + case "JOIN_ALGORITHM_FULL_SORTING_MERGE": + return UserSettings_JoinAlgorithm.JOIN_ALGORITHM_FULL_SORTING_MERGE; + case 7: + case "JOIN_ALGORITHM_PREFER_PARTIAL_MERGE": + return UserSettings_JoinAlgorithm.JOIN_ALGORITHM_PREFER_PARTIAL_MERGE; + case -1: + case "UNRECOGNIZED": + default: + return UserSettings_JoinAlgorithm.UNRECOGNIZED; + } +} -export const User = { - $type: "yandex.cloud.mdb.clickhouse.v1.User" as const, +export function userSettings_JoinAlgorithmToJSON( + object: UserSettings_JoinAlgorithm +): string { + switch (object) { + case UserSettings_JoinAlgorithm.JOIN_ALGORITHM_UNSPECIFIED: + return "JOIN_ALGORITHM_UNSPECIFIED"; + case UserSettings_JoinAlgorithm.JOIN_ALGORITHM_HASH: + return "JOIN_ALGORITHM_HASH"; + case UserSettings_JoinAlgorithm.JOIN_ALGORITHM_PARALLEL_HASH: + return "JOIN_ALGORITHM_PARALLEL_HASH"; + case UserSettings_JoinAlgorithm.JOIN_ALGORITHM_PARTIAL_MERGE: + return "JOIN_ALGORITHM_PARTIAL_MERGE"; + case UserSettings_JoinAlgorithm.JOIN_ALGORITHM_DIRECT: + return "JOIN_ALGORITHM_DIRECT"; + case UserSettings_JoinAlgorithm.JOIN_ALGORITHM_AUTO: + return "JOIN_ALGORITHM_AUTO"; + case UserSettings_JoinAlgorithm.JOIN_ALGORITHM_FULL_SORTING_MERGE: + return "JOIN_ALGORITHM_FULL_SORTING_MERGE"; + case UserSettings_JoinAlgorithm.JOIN_ALGORITHM_PREFER_PARTIAL_MERGE: + return "JOIN_ALGORITHM_PREFER_PARTIAL_MERGE"; + default: + return "UNKNOWN"; + } +} - encode(message: User, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.clusterId !== "") { - writer.uint32(18).string(message.clusterId); - } - for (const v of message.permissions) { - Permission.encode(v!, writer.uint32(26).fork()).ldelim(); - } - if (message.settings !== undefined) { - UserSettings.encode(message.settings, writer.uint32(34).fork()).ldelim(); - } - for (const v of message.quotas) { - UserQuota.encode(v!, writer.uint32(42).fork()).ldelim(); - } - return writer; - }, +export enum UserSettings_FormatRegexpEscapingRule { + FORMAT_REGEXP_ESCAPING_RULE_UNSPECIFIED = 0, + FORMAT_REGEXP_ESCAPING_RULE_ESCAPED = 1, + FORMAT_REGEXP_ESCAPING_RULE_QUOTED = 2, + FORMAT_REGEXP_ESCAPING_RULE_CSV = 3, + FORMAT_REGEXP_ESCAPING_RULE_JSON = 4, + FORMAT_REGEXP_ESCAPING_RULE_XML = 5, + FORMAT_REGEXP_ESCAPING_RULE_RAW = 6, + UNRECOGNIZED = -1, +} - decode(input: _m0.Reader | Uint8Array, length?: number): User { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseUser } as User; - message.permissions = []; - message.quotas = []; - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.name = reader.string(); - break; - case 2: - message.clusterId = reader.string(); - break; +export function userSettings_FormatRegexpEscapingRuleFromJSON( + object: any +): UserSettings_FormatRegexpEscapingRule { + switch (object) { + case 0: + case "FORMAT_REGEXP_ESCAPING_RULE_UNSPECIFIED": + return UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_UNSPECIFIED; + case 1: + case "FORMAT_REGEXP_ESCAPING_RULE_ESCAPED": + return UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_ESCAPED; + case 2: + case "FORMAT_REGEXP_ESCAPING_RULE_QUOTED": + return UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_QUOTED; + case 3: + case "FORMAT_REGEXP_ESCAPING_RULE_CSV": + return UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_CSV; + case 4: + case "FORMAT_REGEXP_ESCAPING_RULE_JSON": + return UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_JSON; + case 5: + case "FORMAT_REGEXP_ESCAPING_RULE_XML": + return UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_XML; + case 6: + case "FORMAT_REGEXP_ESCAPING_RULE_RAW": + return UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_RAW; + case -1: + case "UNRECOGNIZED": + default: + return UserSettings_FormatRegexpEscapingRule.UNRECOGNIZED; + } +} + +export function userSettings_FormatRegexpEscapingRuleToJSON( + object: UserSettings_FormatRegexpEscapingRule +): string { + switch (object) { + case UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_UNSPECIFIED: + return "FORMAT_REGEXP_ESCAPING_RULE_UNSPECIFIED"; + case UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_ESCAPED: + return "FORMAT_REGEXP_ESCAPING_RULE_ESCAPED"; + case UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_QUOTED: + return "FORMAT_REGEXP_ESCAPING_RULE_QUOTED"; + case UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_CSV: + return "FORMAT_REGEXP_ESCAPING_RULE_CSV"; + case UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_JSON: + return "FORMAT_REGEXP_ESCAPING_RULE_JSON"; + case UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_XML: + return "FORMAT_REGEXP_ESCAPING_RULE_XML"; + case UserSettings_FormatRegexpEscapingRule.FORMAT_REGEXP_ESCAPING_RULE_RAW: + return "FORMAT_REGEXP_ESCAPING_RULE_RAW"; + default: + return "UNKNOWN"; + } +} + +export enum UserSettings_DateTimeInputFormat { + DATE_TIME_INPUT_FORMAT_UNSPECIFIED = 0, + DATE_TIME_INPUT_FORMAT_BEST_EFFORT = 1, + DATE_TIME_INPUT_FORMAT_BASIC = 2, + DATE_TIME_INPUT_FORMAT_BEST_EFFORT_US = 3, + UNRECOGNIZED = -1, +} + +export function userSettings_DateTimeInputFormatFromJSON( + object: any +): UserSettings_DateTimeInputFormat { + switch (object) { + case 0: + case "DATE_TIME_INPUT_FORMAT_UNSPECIFIED": + return UserSettings_DateTimeInputFormat.DATE_TIME_INPUT_FORMAT_UNSPECIFIED; + case 1: + case "DATE_TIME_INPUT_FORMAT_BEST_EFFORT": + return UserSettings_DateTimeInputFormat.DATE_TIME_INPUT_FORMAT_BEST_EFFORT; + case 2: + case "DATE_TIME_INPUT_FORMAT_BASIC": + return UserSettings_DateTimeInputFormat.DATE_TIME_INPUT_FORMAT_BASIC; + case 3: + case "DATE_TIME_INPUT_FORMAT_BEST_EFFORT_US": + return UserSettings_DateTimeInputFormat.DATE_TIME_INPUT_FORMAT_BEST_EFFORT_US; + case -1: + case "UNRECOGNIZED": + default: + return UserSettings_DateTimeInputFormat.UNRECOGNIZED; + } +} + +export function userSettings_DateTimeInputFormatToJSON( + object: UserSettings_DateTimeInputFormat +): string { + switch (object) { + case UserSettings_DateTimeInputFormat.DATE_TIME_INPUT_FORMAT_UNSPECIFIED: + return "DATE_TIME_INPUT_FORMAT_UNSPECIFIED"; + case UserSettings_DateTimeInputFormat.DATE_TIME_INPUT_FORMAT_BEST_EFFORT: + return "DATE_TIME_INPUT_FORMAT_BEST_EFFORT"; + case UserSettings_DateTimeInputFormat.DATE_TIME_INPUT_FORMAT_BASIC: + return "DATE_TIME_INPUT_FORMAT_BASIC"; + case UserSettings_DateTimeInputFormat.DATE_TIME_INPUT_FORMAT_BEST_EFFORT_US: + return "DATE_TIME_INPUT_FORMAT_BEST_EFFORT_US"; + default: + return "UNKNOWN"; + } +} + +export enum UserSettings_DateTimeOutputFormat { + DATE_TIME_OUTPUT_FORMAT_UNSPECIFIED = 0, + DATE_TIME_OUTPUT_FORMAT_SIMPLE = 1, + DATE_TIME_OUTPUT_FORMAT_ISO = 2, + DATE_TIME_OUTPUT_FORMAT_UNIX_TIMESTAMP = 3, + UNRECOGNIZED = -1, +} + +export function userSettings_DateTimeOutputFormatFromJSON( + object: any +): UserSettings_DateTimeOutputFormat { + switch (object) { + case 0: + case "DATE_TIME_OUTPUT_FORMAT_UNSPECIFIED": + return UserSettings_DateTimeOutputFormat.DATE_TIME_OUTPUT_FORMAT_UNSPECIFIED; + case 1: + case "DATE_TIME_OUTPUT_FORMAT_SIMPLE": + return UserSettings_DateTimeOutputFormat.DATE_TIME_OUTPUT_FORMAT_SIMPLE; + case 2: + case "DATE_TIME_OUTPUT_FORMAT_ISO": + return UserSettings_DateTimeOutputFormat.DATE_TIME_OUTPUT_FORMAT_ISO; + case 3: + case "DATE_TIME_OUTPUT_FORMAT_UNIX_TIMESTAMP": + return UserSettings_DateTimeOutputFormat.DATE_TIME_OUTPUT_FORMAT_UNIX_TIMESTAMP; + case -1: + case "UNRECOGNIZED": + default: + return UserSettings_DateTimeOutputFormat.UNRECOGNIZED; + } +} + +export function userSettings_DateTimeOutputFormatToJSON( + object: UserSettings_DateTimeOutputFormat +): string { + switch (object) { + case UserSettings_DateTimeOutputFormat.DATE_TIME_OUTPUT_FORMAT_UNSPECIFIED: + return "DATE_TIME_OUTPUT_FORMAT_UNSPECIFIED"; + case UserSettings_DateTimeOutputFormat.DATE_TIME_OUTPUT_FORMAT_SIMPLE: + return "DATE_TIME_OUTPUT_FORMAT_SIMPLE"; + case UserSettings_DateTimeOutputFormat.DATE_TIME_OUTPUT_FORMAT_ISO: + return "DATE_TIME_OUTPUT_FORMAT_ISO"; + case UserSettings_DateTimeOutputFormat.DATE_TIME_OUTPUT_FORMAT_UNIX_TIMESTAMP: + return "DATE_TIME_OUTPUT_FORMAT_UNIX_TIMESTAMP"; + default: + return "UNKNOWN"; + } +} + +export enum UserSettings_LocalFilesystemReadMethod { + LOCAL_FILESYSTEM_READ_METHOD_UNSPECIFIED = 0, + LOCAL_FILESYSTEM_READ_METHOD_READ = 1, + LOCAL_FILESYSTEM_READ_METHOD_PREAD_THREADPOOL = 2, + LOCAL_FILESYSTEM_READ_METHOD_PREAD = 3, + LOCAL_FILESYSTEM_READ_METHOD_NMAP = 4, + UNRECOGNIZED = -1, +} + +export function userSettings_LocalFilesystemReadMethodFromJSON( + object: any +): UserSettings_LocalFilesystemReadMethod { + switch (object) { + case 0: + case "LOCAL_FILESYSTEM_READ_METHOD_UNSPECIFIED": + return UserSettings_LocalFilesystemReadMethod.LOCAL_FILESYSTEM_READ_METHOD_UNSPECIFIED; + case 1: + case "LOCAL_FILESYSTEM_READ_METHOD_READ": + return UserSettings_LocalFilesystemReadMethod.LOCAL_FILESYSTEM_READ_METHOD_READ; + case 2: + case "LOCAL_FILESYSTEM_READ_METHOD_PREAD_THREADPOOL": + return UserSettings_LocalFilesystemReadMethod.LOCAL_FILESYSTEM_READ_METHOD_PREAD_THREADPOOL; + case 3: + case "LOCAL_FILESYSTEM_READ_METHOD_PREAD": + return UserSettings_LocalFilesystemReadMethod.LOCAL_FILESYSTEM_READ_METHOD_PREAD; + case 4: + case "LOCAL_FILESYSTEM_READ_METHOD_NMAP": + return UserSettings_LocalFilesystemReadMethod.LOCAL_FILESYSTEM_READ_METHOD_NMAP; + case -1: + case "UNRECOGNIZED": + default: + return UserSettings_LocalFilesystemReadMethod.UNRECOGNIZED; + } +} + +export function userSettings_LocalFilesystemReadMethodToJSON( + object: UserSettings_LocalFilesystemReadMethod +): string { + switch (object) { + case UserSettings_LocalFilesystemReadMethod.LOCAL_FILESYSTEM_READ_METHOD_UNSPECIFIED: + return "LOCAL_FILESYSTEM_READ_METHOD_UNSPECIFIED"; + case UserSettings_LocalFilesystemReadMethod.LOCAL_FILESYSTEM_READ_METHOD_READ: + return "LOCAL_FILESYSTEM_READ_METHOD_READ"; + case UserSettings_LocalFilesystemReadMethod.LOCAL_FILESYSTEM_READ_METHOD_PREAD_THREADPOOL: + return "LOCAL_FILESYSTEM_READ_METHOD_PREAD_THREADPOOL"; + case UserSettings_LocalFilesystemReadMethod.LOCAL_FILESYSTEM_READ_METHOD_PREAD: + return "LOCAL_FILESYSTEM_READ_METHOD_PREAD"; + case UserSettings_LocalFilesystemReadMethod.LOCAL_FILESYSTEM_READ_METHOD_NMAP: + return "LOCAL_FILESYSTEM_READ_METHOD_NMAP"; + default: + return "UNKNOWN"; + } +} + +export enum UserSettings_RemoteFilesystemReadMethod { + REMOTE_FILESYSTEM_READ_METHOD_UNSPECIFIED = 0, + REMOTE_FILESYSTEM_READ_METHOD_READ = 1, + REMOTE_FILESYSTEM_READ_METHOD_THREADPOOL = 2, + UNRECOGNIZED = -1, +} + +export function userSettings_RemoteFilesystemReadMethodFromJSON( + object: any +): UserSettings_RemoteFilesystemReadMethod { + switch (object) { + case 0: + case "REMOTE_FILESYSTEM_READ_METHOD_UNSPECIFIED": + return UserSettings_RemoteFilesystemReadMethod.REMOTE_FILESYSTEM_READ_METHOD_UNSPECIFIED; + case 1: + case "REMOTE_FILESYSTEM_READ_METHOD_READ": + return UserSettings_RemoteFilesystemReadMethod.REMOTE_FILESYSTEM_READ_METHOD_READ; + case 2: + case "REMOTE_FILESYSTEM_READ_METHOD_THREADPOOL": + return UserSettings_RemoteFilesystemReadMethod.REMOTE_FILESYSTEM_READ_METHOD_THREADPOOL; + case -1: + case "UNRECOGNIZED": + default: + return UserSettings_RemoteFilesystemReadMethod.UNRECOGNIZED; + } +} + +export function userSettings_RemoteFilesystemReadMethodToJSON( + object: UserSettings_RemoteFilesystemReadMethod +): string { + switch (object) { + case UserSettings_RemoteFilesystemReadMethod.REMOTE_FILESYSTEM_READ_METHOD_UNSPECIFIED: + return "REMOTE_FILESYSTEM_READ_METHOD_UNSPECIFIED"; + case UserSettings_RemoteFilesystemReadMethod.REMOTE_FILESYSTEM_READ_METHOD_READ: + return "REMOTE_FILESYSTEM_READ_METHOD_READ"; + case UserSettings_RemoteFilesystemReadMethod.REMOTE_FILESYSTEM_READ_METHOD_THREADPOOL: + return "REMOTE_FILESYSTEM_READ_METHOD_THREADPOOL"; + default: + return "UNKNOWN"; + } +} + +/** + * ClickHouse quota representation. Each quota associated with an user and limits it resource usage for an interval. + * See in-depth description [ClickHouse documentation](https://clickhouse.com/docs/en/operations/quotas/). + */ +export interface UserQuota { + $type: "yandex.cloud.mdb.clickhouse.v1.UserQuota"; + /** + * Duration of interval for quota in milliseconds. + * Minimal value is 1 second. + */ + intervalDuration?: number; + /** + * The total number of queries. + * 0 - unlimited. + */ + queries?: number; + /** + * The number of queries that threw exception. + * 0 - unlimited. + */ + errors?: number; + /** + * The total number of rows given as the result.. + * 0 - unlimited. + */ + resultRows?: number; + /** + * The total number of source rows read from tables for running the query, on all remote servers. + * 0 - unlimited. + */ + readRows?: number; + /** + * The total query execution time, in milliseconds (wall time). + * 0 - unlimited. + */ + executionTime?: number; +} + +const baseUser: object = { + $type: "yandex.cloud.mdb.clickhouse.v1.User", + name: "", + clusterId: "", +}; + +export const User = { + $type: "yandex.cloud.mdb.clickhouse.v1.User" as const, + + encode(message: User, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.clusterId !== "") { + writer.uint32(18).string(message.clusterId); + } + for (const v of message.permissions) { + Permission.encode(v!, writer.uint32(26).fork()).ldelim(); + } + if (message.settings !== undefined) { + UserSettings.encode(message.settings, writer.uint32(34).fork()).ldelim(); + } + for (const v of message.quotas) { + UserQuota.encode(v!, writer.uint32(42).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): User { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUser } as User; + message.permissions = []; + message.quotas = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.clusterId = reader.string(); + break; case 3: message.permissions.push(Permission.decode(reader, reader.uint32())); break; @@ -1374,8 +1923,15 @@ const baseUserSettings: object = { timeoutOverflowMode: 0, setOverflowMode: 0, joinOverflowMode: 0, + joinAlgorithm: 0, countDistinctImplementation: 0, + dateTimeInputFormat: 0, + dateTimeOutputFormat: 0, quotaMode: 0, + formatRegexp: "", + formatRegexpEscapingRule: 0, + localFilesystemReadMethod: 0, + remoteFilesystemReadMethod: 0, }; export const UserSettings = { @@ -1397,10 +1953,13 @@ export const UserSettings = { writer.uint32(18).fork() ).ldelim(); } - if (message.insertQuorum !== undefined) { - Int64Value.encode( - { $type: "google.protobuf.Int64Value", value: message.insertQuorum! }, - writer.uint32(26).fork() + if (message.allowIntrospectionFunctions !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.allowIntrospectionFunctions!, + }, + writer.uint32(770).fork() ).ldelim(); } if (message.connectTimeout !== undefined) { @@ -1409,6 +1968,15 @@ export const UserSettings = { writer.uint32(314).fork() ).ldelim(); } + if (message.connectTimeoutWithFailover !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.connectTimeoutWithFailover!, + }, + writer.uint32(778).fork() + ).ldelim(); + } if (message.receiveTimeout !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", value: message.receiveTimeout! }, @@ -1421,6 +1989,21 @@ export const UserSettings = { writer.uint32(330).fork() ).ldelim(); } + if (message.timeoutBeforeCheckingExecutionSpeed !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.timeoutBeforeCheckingExecutionSpeed!, + }, + writer.uint32(786).fork() + ).ldelim(); + } + if (message.insertQuorum !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.insertQuorum! }, + writer.uint32(26).fork() + ).ldelim(); + } if (message.insertQuorumTimeout !== undefined) { Int64Value.encode( { @@ -1430,6 +2013,24 @@ export const UserSettings = { writer.uint32(34).fork() ).ldelim(); } + if (message.insertQuorumParallel !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.insertQuorumParallel!, + }, + writer.uint32(794).fork() + ).ldelim(); + } + if (message.insertNullAsDefault !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.insertNullAsDefault!, + }, + writer.uint32(802).fork() + ).ldelim(); + } if (message.selectSequentialConsistency !== undefined) { BoolValue.encode( { @@ -1439,6 +2040,24 @@ export const UserSettings = { writer.uint32(42).fork() ).ldelim(); } + if (message.deduplicateBlocksInDependentMaterializedViews !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.deduplicateBlocksInDependentMaterializedViews!, + }, + writer.uint32(810).fork() + ).ldelim(); + } + if (message.replicationAlterPartitionsSync !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.replicationAlterPartitionsSync!, + }, + writer.uint32(338).fork() + ).ldelim(); + } if (message.maxReplicaDelayForDistributedQueries !== undefined) { Int64Value.encode( { @@ -1457,15 +2076,6 @@ export const UserSettings = { writer.uint32(58).fork() ).ldelim(); } - if (message.replicationAlterPartitionsSync !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.replicationAlterPartitionsSync!, - }, - writer.uint32(338).fork() - ).ldelim(); - } if (message.distributedProductMode !== 0) { writer.uint32(344).int32(message.distributedProductMode); } @@ -1496,22 +2106,7 @@ export const UserSettings = { writer.uint32(650).fork() ).ldelim(); } - if (message.compile !== undefined) { - BoolValue.encode( - { $type: "google.protobuf.BoolValue", value: message.compile! }, - writer.uint32(354).fork() - ).ldelim(); - } - if (message.minCountToCompile !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.minCountToCompile!, - }, - writer.uint32(362).fork() - ).ldelim(); - } - if (message.compileExpressions !== undefined) { + if (message.compileExpressions !== undefined) { BoolValue.encode( { $type: "google.protobuf.BoolValue", @@ -1697,6 +2292,24 @@ export const UserSettings = { writer.uint32(466).fork() ).ldelim(); } + if (message.maxPartitionsPerInsertBlock !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPartitionsPerInsertBlock!, + }, + writer.uint32(818).fork() + ).ldelim(); + } + if (message.maxConcurrentQueriesForUser !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxConcurrentQueriesForUser!, + }, + writer.uint32(826).fork() + ).ldelim(); + } if (message.forceIndexByDate !== undefined) { BoolValue.encode( { @@ -1853,6 +2466,20 @@ export const UserSettings = { if (message.joinOverflowMode !== 0) { writer.uint32(736).int32(message.joinOverflowMode); } + writer.uint32(834).fork(); + for (const v of message.joinAlgorithm) { + writer.int32(v); + } + writer.ldelim(); + if (message.anyJoinDistinctRightTableKeys !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.anyJoinDistinctRightTableKeys!, + }, + writer.uint32(842).fork() + ).ldelim(); + } if (message.maxColumnsToRead !== undefined) { Int64Value.encode( { @@ -1946,6 +2573,27 @@ export const UserSettings = { writer.uint32(498).fork() ).ldelim(); } + if (message.inputFormatNullAsDefault !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.inputFormatNullAsDefault!, + }, + writer.uint32(850).fork() + ).ldelim(); + } + if (message.dateTimeInputFormat !== 0) { + writer.uint32(856).int32(message.dateTimeInputFormat); + } + if (message.inputFormatWithNamesUseHeader !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.inputFormatWithNamesUseHeader!, + }, + writer.uint32(866).fork() + ).ldelim(); + } if (message.outputFormatJsonQuote64bitIntegers !== undefined) { BoolValue.encode( { @@ -1964,6 +2612,9 @@ export const UserSettings = { writer.uint32(514).fork() ).ldelim(); } + if (message.dateTimeOutputFormat !== 0) { + writer.uint32(872).int32(message.dateTimeOutputFormat); + } if (message.lowCardinalityAllowInNativeFormat !== undefined) { BoolValue.encode( { @@ -1973,34 +2624,22 @@ export const UserSettings = { writer.uint32(626).fork() ).ldelim(); } - if (message.emptyResultForAggregationByEmptySet !== undefined) { + if (message.allowSuspiciousLowCardinalityTypes !== undefined) { BoolValue.encode( { $type: "google.protobuf.BoolValue", - value: message.emptyResultForAggregationByEmptySet!, + value: message.allowSuspiciousLowCardinalityTypes!, }, - writer.uint32(634).fork() + writer.uint32(882).fork() ).ldelim(); } - if (message.joinedSubqueryRequiresAlias !== undefined) { + if (message.emptyResultForAggregationByEmptySet !== undefined) { BoolValue.encode( { $type: "google.protobuf.BoolValue", - value: message.joinedSubqueryRequiresAlias!, + value: message.emptyResultForAggregationByEmptySet!, }, - writer.uint32(746).fork() - ).ldelim(); - } - if (message.joinUseNulls !== undefined) { - BoolValue.encode( - { $type: "google.protobuf.BoolValue", value: message.joinUseNulls! }, - writer.uint32(754).fork() - ).ldelim(); - } - if (message.transformNullIn !== undefined) { - BoolValue.encode( - { $type: "google.protobuf.BoolValue", value: message.transformNullIn! }, - writer.uint32(762).fork() + writer.uint32(634).fork() ).ldelim(); } if (message.httpConnectionTimeout !== undefined) { @@ -2066,9 +2705,264 @@ export const UserSettings = { writer.uint32(570).fork() ).ldelim(); } + if (message.cancelHttpReadonlyQueriesOnClientClose !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.cancelHttpReadonlyQueriesOnClientClose!, + }, + writer.uint32(890).fork() + ).ldelim(); + } + if (message.maxHttpGetRedirects !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxHttpGetRedirects!, + }, + writer.uint32(898).fork() + ).ldelim(); + } + if (message.joinedSubqueryRequiresAlias !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.joinedSubqueryRequiresAlias!, + }, + writer.uint32(746).fork() + ).ldelim(); + } + if (message.joinUseNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.joinUseNulls! }, + writer.uint32(754).fork() + ).ldelim(); + } + if (message.transformNullIn !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.transformNullIn! }, + writer.uint32(762).fork() + ).ldelim(); + } if (message.quotaMode !== 0) { writer.uint32(640).int32(message.quotaMode); } + if (message.flattenNested !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.flattenNested! }, + writer.uint32(906).fork() + ).ldelim(); + } + if (message.formatRegexp !== "") { + writer.uint32(914).string(message.formatRegexp); + } + if (message.formatRegexpEscapingRule !== 0) { + writer.uint32(920).int32(message.formatRegexpEscapingRule); + } + if (message.formatRegexpSkipUnmatched !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.formatRegexpSkipUnmatched!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.asyncInsert !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.asyncInsert! }, + writer.uint32(938).fork() + ).ldelim(); + } + if (message.asyncInsertThreads !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.asyncInsertThreads!, + }, + writer.uint32(946).fork() + ).ldelim(); + } + if (message.waitForAsyncInsert !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.waitForAsyncInsert!, + }, + writer.uint32(954).fork() + ).ldelim(); + } + if (message.waitForAsyncInsertTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.waitForAsyncInsertTimeout!, + }, + writer.uint32(962).fork() + ).ldelim(); + } + if (message.asyncInsertMaxDataSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.asyncInsertMaxDataSize!, + }, + writer.uint32(970).fork() + ).ldelim(); + } + if (message.asyncInsertBusyTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.asyncInsertBusyTimeout!, + }, + writer.uint32(978).fork() + ).ldelim(); + } + if (message.asyncInsertStaleTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.asyncInsertStaleTimeout!, + }, + writer.uint32(986).fork() + ).ldelim(); + } + if (message.memoryProfilerStep !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.memoryProfilerStep!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.memoryProfilerSampleProbability !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.memoryProfilerSampleProbability!, + }, + writer.uint32(1002).fork() + ).ldelim(); + } + if (message.maxFinalThreads !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxFinalThreads!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.inputFormatParallelParsing !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.inputFormatParallelParsing!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.inputFormatImportNestedJson !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.inputFormatImportNestedJson!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } + if (message.localFilesystemReadMethod !== 0) { + writer.uint32(1032).int32(message.localFilesystemReadMethod); + } + if (message.maxReadBufferSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxReadBufferSize!, + }, + writer.uint32(1042).fork() + ).ldelim(); + } + if (message.insertKeeperMaxRetries !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.insertKeeperMaxRetries!, + }, + writer.uint32(1050).fork() + ).ldelim(); + } + if (message.maxTemporaryDataOnDiskSizeForUser !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxTemporaryDataOnDiskSizeForUser!, + }, + writer.uint32(1058).fork() + ).ldelim(); + } + if (message.maxTemporaryDataOnDiskSizeForQuery !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxTemporaryDataOnDiskSizeForQuery!, + }, + writer.uint32(1066).fork() + ).ldelim(); + } + if (message.maxParserDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxParserDepth! }, + writer.uint32(1074).fork() + ).ldelim(); + } + if (message.remoteFilesystemReadMethod !== 0) { + writer.uint32(1080).int32(message.remoteFilesystemReadMethod); + } + if (message.memoryOvercommitRatioDenominator !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.memoryOvercommitRatioDenominator!, + }, + writer.uint32(1090).fork() + ).ldelim(); + } + if (message.memoryOvercommitRatioDenominatorForUser !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.memoryOvercommitRatioDenominatorForUser!, + }, + writer.uint32(1098).fork() + ).ldelim(); + } + if (message.memoryUsageOvercommitMaxWaitMicroseconds !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.memoryUsageOvercommitMaxWaitMicroseconds!, + }, + writer.uint32(1106).fork() + ).ldelim(); + } + if (message.compile !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.compile! }, + writer.uint32(354).fork() + ).ldelim(); + } + if (message.minCountToCompile !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.minCountToCompile!, + }, + writer.uint32(362).fork() + ).ldelim(); + } return writer; }, @@ -2076,6 +2970,7 @@ export const UserSettings = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseUserSettings } as UserSettings; + message.joinAlgorithm = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2085,8 +2980,8 @@ export const UserSettings = { case 2: message.allowDdl = BoolValue.decode(reader, reader.uint32()).value; break; - case 3: - message.insertQuorum = Int64Value.decode( + case 96: + message.allowIntrospectionFunctions = BoolValue.decode( reader, reader.uint32() ).value; @@ -2097,6 +2992,12 @@ export const UserSettings = { reader.uint32() ).value; break; + case 97: + message.connectTimeoutWithFailover = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; case 40: message.receiveTimeout = Int64Value.decode( reader, @@ -2109,26 +3010,44 @@ export const UserSettings = { reader.uint32() ).value; break; + case 98: + message.timeoutBeforeCheckingExecutionSpeed = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.insertQuorum = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; case 4: message.insertQuorumTimeout = Int64Value.decode( reader, reader.uint32() ).value; break; - case 5: - message.selectSequentialConsistency = BoolValue.decode( + case 99: + message.insertQuorumParallel = BoolValue.decode( reader, reader.uint32() ).value; break; - case 6: - message.maxReplicaDelayForDistributedQueries = Int64Value.decode( + case 100: + message.insertNullAsDefault = BoolValue.decode( reader, reader.uint32() ).value; break; - case 7: - message.fallbackToStaleReplicasForDistributedQueries = + case 5: + message.selectSequentialConsistency = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 101: + message.deduplicateBlocksInDependentMaterializedViews = BoolValue.decode(reader, reader.uint32()).value; break; case 42: @@ -2137,6 +3056,16 @@ export const UserSettings = { reader.uint32() ).value; break; + case 6: + message.maxReplicaDelayForDistributedQueries = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.fallbackToStaleReplicasForDistributedQueries = + BoolValue.decode(reader, reader.uint32()).value; + break; case 43: message.distributedProductMode = reader.int32() as any; break; @@ -2158,15 +3087,6 @@ export const UserSettings = { reader.uint32() ).value; break; - case 44: - message.compile = BoolValue.decode(reader, reader.uint32()).value; - break; - case 45: - message.minCountToCompile = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; case 46: message.compileExpressions = BoolValue.decode( reader, @@ -2293,6 +3213,18 @@ export const UserSettings = { reader.uint32() ).value; break; + case 102: + message.maxPartitionsPerInsertBlock = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 103: + message.maxConcurrentQueriesForUser = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; case 59: message.forceIndexByDate = BoolValue.decode( reader, @@ -2395,195 +3327,397 @@ export const UserSettings = { reader.uint32() ).value; break; - case 31: - message.timeoutOverflowMode = reader.int32() as any; - break; - case 87: - message.maxRowsInSet = Int64Value.decode( + case 31: + message.timeoutOverflowMode = reader.int32() as any; + break; + case 87: + message.maxRowsInSet = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 88: + message.maxBytesInSet = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 89: + message.setOverflowMode = reader.int32() as any; + break; + case 90: + message.maxRowsInJoin = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 91: + message.maxBytesInJoin = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 92: + message.joinOverflowMode = reader.int32() as any; + break; + case 104: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.joinAlgorithm.push(reader.int32() as any); + } + } else { + message.joinAlgorithm.push(reader.int32() as any); + } + break; + case 105: + message.anyJoinDistinctRightTableKeys = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.maxColumnsToRead = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 33: + message.maxTemporaryColumns = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 34: + message.maxTemporaryNonConstColumns = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 35: + message.maxQuerySize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 36: + message.maxAstDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 37: + message.maxAstElements = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.maxExpandedAstElements = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 84: + message.minExecutionSpeed = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 85: + message.minExecutionSpeedBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 86: + message.countDistinctImplementation = reader.int32() as any; + break; + case 61: + message.inputFormatValuesInterpretExpressions = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.inputFormatDefaultsForOmittedFields = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 106: + message.inputFormatNullAsDefault = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 107: + message.dateTimeInputFormat = reader.int32() as any; + break; + case 108: + message.inputFormatWithNamesUseHeader = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.outputFormatJsonQuote64bitIntegers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 64: + message.outputFormatJsonQuoteDenormals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 109: + message.dateTimeOutputFormat = reader.int32() as any; + break; + case 78: + message.lowCardinalityAllowInNativeFormat = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 110: + message.allowSuspiciousLowCardinalityTypes = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 79: + message.emptyResultForAggregationByEmptySet = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.httpConnectionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.httpReceiveTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.httpSendTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.enableHttpCompression = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.sendProgressInHttpHeaders = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 70: + message.httpHeadersProgressInterval = Int64Value.decode( reader, reader.uint32() ).value; break; - case 88: - message.maxBytesInSet = Int64Value.decode( + case 71: + message.addHttpCorsHeader = BoolValue.decode( reader, reader.uint32() ).value; break; - case 89: - message.setOverflowMode = reader.int32() as any; - break; - case 90: - message.maxRowsInJoin = Int64Value.decode( + case 111: + message.cancelHttpReadonlyQueriesOnClientClose = BoolValue.decode( reader, reader.uint32() ).value; break; - case 91: - message.maxBytesInJoin = Int64Value.decode( + case 112: + message.maxHttpGetRedirects = Int64Value.decode( reader, reader.uint32() ).value; break; - case 92: - message.joinOverflowMode = reader.int32() as any; - break; - case 32: - message.maxColumnsToRead = Int64Value.decode( + case 93: + message.joinedSubqueryRequiresAlias = BoolValue.decode( reader, reader.uint32() ).value; break; - case 33: - message.maxTemporaryColumns = Int64Value.decode( + case 94: + message.joinUseNulls = BoolValue.decode( reader, reader.uint32() ).value; break; - case 34: - message.maxTemporaryNonConstColumns = Int64Value.decode( + case 95: + message.transformNullIn = BoolValue.decode( reader, reader.uint32() ).value; break; - case 35: - message.maxQuerySize = Int64Value.decode( + case 80: + message.quotaMode = reader.int32() as any; + break; + case 113: + message.flattenNested = BoolValue.decode( reader, reader.uint32() ).value; break; - case 36: - message.maxAstDepth = Int64Value.decode( + case 114: + message.formatRegexp = reader.string(); + break; + case 115: + message.formatRegexpEscapingRule = reader.int32() as any; + break; + case 116: + message.formatRegexpSkipUnmatched = BoolValue.decode( reader, reader.uint32() ).value; break; - case 37: - message.maxAstElements = Int64Value.decode( + case 117: + message.asyncInsert = BoolValue.decode(reader, reader.uint32()).value; + break; + case 118: + message.asyncInsertThreads = Int64Value.decode( reader, reader.uint32() ).value; break; - case 38: - message.maxExpandedAstElements = Int64Value.decode( + case 119: + message.waitForAsyncInsert = BoolValue.decode( reader, reader.uint32() ).value; break; - case 84: - message.minExecutionSpeed = Int64Value.decode( + case 120: + message.waitForAsyncInsertTimeout = Int64Value.decode( reader, reader.uint32() ).value; break; - case 85: - message.minExecutionSpeedBytes = Int64Value.decode( + case 121: + message.asyncInsertMaxDataSize = Int64Value.decode( reader, reader.uint32() ).value; break; - case 86: - message.countDistinctImplementation = reader.int32() as any; - break; - case 61: - message.inputFormatValuesInterpretExpressions = BoolValue.decode( + case 122: + message.asyncInsertBusyTimeout = Int64Value.decode( reader, reader.uint32() ).value; break; - case 62: - message.inputFormatDefaultsForOmittedFields = BoolValue.decode( + case 123: + message.asyncInsertStaleTimeout = Int64Value.decode( reader, reader.uint32() ).value; break; - case 63: - message.outputFormatJsonQuote64bitIntegers = BoolValue.decode( + case 124: + message.memoryProfilerStep = Int64Value.decode( reader, reader.uint32() ).value; break; - case 64: - message.outputFormatJsonQuoteDenormals = BoolValue.decode( + case 125: + message.memoryProfilerSampleProbability = DoubleValue.decode( reader, reader.uint32() ).value; break; - case 78: - message.lowCardinalityAllowInNativeFormat = BoolValue.decode( + case 126: + message.maxFinalThreads = Int64Value.decode( reader, reader.uint32() ).value; break; - case 79: - message.emptyResultForAggregationByEmptySet = BoolValue.decode( + case 127: + message.inputFormatParallelParsing = BoolValue.decode( reader, reader.uint32() ).value; break; - case 93: - message.joinedSubqueryRequiresAlias = BoolValue.decode( + case 128: + message.inputFormatImportNestedJson = BoolValue.decode( reader, reader.uint32() ).value; break; - case 94: - message.joinUseNulls = BoolValue.decode( + case 129: + message.localFilesystemReadMethod = reader.int32() as any; + break; + case 130: + message.maxReadBufferSize = Int64Value.decode( reader, reader.uint32() ).value; break; - case 95: - message.transformNullIn = BoolValue.decode( + case 131: + message.insertKeeperMaxRetries = Int64Value.decode( reader, reader.uint32() ).value; break; - case 65: - message.httpConnectionTimeout = Int64Value.decode( + case 132: + message.maxTemporaryDataOnDiskSizeForUser = Int64Value.decode( reader, reader.uint32() ).value; break; - case 66: - message.httpReceiveTimeout = Int64Value.decode( + case 133: + message.maxTemporaryDataOnDiskSizeForQuery = Int64Value.decode( reader, reader.uint32() ).value; break; - case 67: - message.httpSendTimeout = Int64Value.decode( + case 134: + message.maxParserDepth = Int64Value.decode( reader, reader.uint32() ).value; break; - case 68: - message.enableHttpCompression = BoolValue.decode( + case 135: + message.remoteFilesystemReadMethod = reader.int32() as any; + break; + case 136: + message.memoryOvercommitRatioDenominator = Int64Value.decode( reader, reader.uint32() ).value; break; - case 69: - message.sendProgressInHttpHeaders = BoolValue.decode( + case 137: + message.memoryOvercommitRatioDenominatorForUser = Int64Value.decode( reader, reader.uint32() ).value; break; - case 70: - message.httpHeadersProgressInterval = Int64Value.decode( + case 138: + message.memoryUsageOvercommitMaxWaitMicroseconds = Int64Value.decode( reader, reader.uint32() ).value; break; - case 71: - message.addHttpCorsHeader = BoolValue.decode( + case 44: + message.compile = BoolValue.decode(reader, reader.uint32()).value; + break; + case 45: + message.minCountToCompile = Int64Value.decode( reader, reader.uint32() ).value; break; - case 80: - message.quotaMode = reader.int32() as any; - break; default: reader.skipType(tag & 7); break; @@ -2602,14 +3736,20 @@ export const UserSettings = { object.allowDdl !== undefined && object.allowDdl !== null ? Boolean(object.allowDdl) : undefined; - message.insertQuorum = - object.insertQuorum !== undefined && object.insertQuorum !== null - ? Number(object.insertQuorum) + message.allowIntrospectionFunctions = + object.allowIntrospectionFunctions !== undefined && + object.allowIntrospectionFunctions !== null + ? Boolean(object.allowIntrospectionFunctions) : undefined; message.connectTimeout = object.connectTimeout !== undefined && object.connectTimeout !== null ? Number(object.connectTimeout) : undefined; + message.connectTimeoutWithFailover = + object.connectTimeoutWithFailover !== undefined && + object.connectTimeoutWithFailover !== null + ? Number(object.connectTimeoutWithFailover) + : undefined; message.receiveTimeout = object.receiveTimeout !== undefined && object.receiveTimeout !== null ? Number(object.receiveTimeout) @@ -2618,16 +3758,45 @@ export const UserSettings = { object.sendTimeout !== undefined && object.sendTimeout !== null ? Number(object.sendTimeout) : undefined; + message.timeoutBeforeCheckingExecutionSpeed = + object.timeoutBeforeCheckingExecutionSpeed !== undefined && + object.timeoutBeforeCheckingExecutionSpeed !== null + ? Number(object.timeoutBeforeCheckingExecutionSpeed) + : undefined; + message.insertQuorum = + object.insertQuorum !== undefined && object.insertQuorum !== null + ? Number(object.insertQuorum) + : undefined; message.insertQuorumTimeout = object.insertQuorumTimeout !== undefined && object.insertQuorumTimeout !== null ? Number(object.insertQuorumTimeout) : undefined; + message.insertQuorumParallel = + object.insertQuorumParallel !== undefined && + object.insertQuorumParallel !== null + ? Boolean(object.insertQuorumParallel) + : undefined; + message.insertNullAsDefault = + object.insertNullAsDefault !== undefined && + object.insertNullAsDefault !== null + ? Boolean(object.insertNullAsDefault) + : undefined; message.selectSequentialConsistency = object.selectSequentialConsistency !== undefined && object.selectSequentialConsistency !== null ? Boolean(object.selectSequentialConsistency) : undefined; + message.deduplicateBlocksInDependentMaterializedViews = + object.deduplicateBlocksInDependentMaterializedViews !== undefined && + object.deduplicateBlocksInDependentMaterializedViews !== null + ? Boolean(object.deduplicateBlocksInDependentMaterializedViews) + : undefined; + message.replicationAlterPartitionsSync = + object.replicationAlterPartitionsSync !== undefined && + object.replicationAlterPartitionsSync !== null + ? Number(object.replicationAlterPartitionsSync) + : undefined; message.maxReplicaDelayForDistributedQueries = object.maxReplicaDelayForDistributedQueries !== undefined && object.maxReplicaDelayForDistributedQueries !== null @@ -2638,11 +3807,6 @@ export const UserSettings = { object.fallbackToStaleReplicasForDistributedQueries !== null ? Boolean(object.fallbackToStaleReplicasForDistributedQueries) : undefined; - message.replicationAlterPartitionsSync = - object.replicationAlterPartitionsSync !== undefined && - object.replicationAlterPartitionsSync !== null - ? Number(object.replicationAlterPartitionsSync) - : undefined; message.distributedProductMode = object.distributedProductMode !== undefined && object.distributedProductMode !== null @@ -2665,15 +3829,6 @@ export const UserSettings = { object.skipUnavailableShards !== null ? Boolean(object.skipUnavailableShards) : undefined; - message.compile = - object.compile !== undefined && object.compile !== null - ? Boolean(object.compile) - : undefined; - message.minCountToCompile = - object.minCountToCompile !== undefined && - object.minCountToCompile !== null - ? Number(object.minCountToCompile) - : undefined; message.compileExpressions = object.compileExpressions !== undefined && object.compileExpressions !== null @@ -2780,6 +3935,16 @@ export const UserSettings = { object.maxNetworkBandwidthForUser !== null ? Number(object.maxNetworkBandwidthForUser) : undefined; + message.maxPartitionsPerInsertBlock = + object.maxPartitionsPerInsertBlock !== undefined && + object.maxPartitionsPerInsertBlock !== null + ? Number(object.maxPartitionsPerInsertBlock) + : undefined; + message.maxConcurrentQueriesForUser = + object.maxConcurrentQueriesForUser !== undefined && + object.maxConcurrentQueriesForUser !== null + ? Number(object.maxConcurrentQueriesForUser) + : undefined; message.forceIndexByDate = object.forceIndexByDate !== undefined && object.forceIndexByDate !== null ? Boolean(object.forceIndexByDate) @@ -2897,6 +4062,14 @@ export const UserSettings = { object.joinOverflowMode !== undefined && object.joinOverflowMode !== null ? userSettings_OverflowModeFromJSON(object.joinOverflowMode) : 0; + message.joinAlgorithm = (object.joinAlgorithm ?? []).map((e: any) => + userSettings_JoinAlgorithmFromJSON(e) + ); + message.anyJoinDistinctRightTableKeys = + object.anyJoinDistinctRightTableKeys !== undefined && + object.anyJoinDistinctRightTableKeys !== null + ? Boolean(object.anyJoinDistinctRightTableKeys) + : undefined; message.maxColumnsToRead = object.maxColumnsToRead !== undefined && object.maxColumnsToRead !== null ? Number(object.maxColumnsToRead) @@ -2955,6 +4128,21 @@ export const UserSettings = { object.inputFormatDefaultsForOmittedFields !== null ? Boolean(object.inputFormatDefaultsForOmittedFields) : undefined; + message.inputFormatNullAsDefault = + object.inputFormatNullAsDefault !== undefined && + object.inputFormatNullAsDefault !== null + ? Boolean(object.inputFormatNullAsDefault) + : undefined; + message.dateTimeInputFormat = + object.dateTimeInputFormat !== undefined && + object.dateTimeInputFormat !== null + ? userSettings_DateTimeInputFormatFromJSON(object.dateTimeInputFormat) + : 0; + message.inputFormatWithNamesUseHeader = + object.inputFormatWithNamesUseHeader !== undefined && + object.inputFormatWithNamesUseHeader !== null + ? Boolean(object.inputFormatWithNamesUseHeader) + : undefined; message.outputFormatJsonQuote64bitIntegers = object.outputFormatJsonQuote_64bitIntegers !== undefined && object.outputFormatJsonQuote_64bitIntegers !== null @@ -2965,29 +4153,26 @@ export const UserSettings = { object.outputFormatJsonQuoteDenormals !== null ? Boolean(object.outputFormatJsonQuoteDenormals) : undefined; + message.dateTimeOutputFormat = + object.dateTimeOutputFormat !== undefined && + object.dateTimeOutputFormat !== null + ? userSettings_DateTimeOutputFormatFromJSON(object.dateTimeOutputFormat) + : 0; message.lowCardinalityAllowInNativeFormat = object.lowCardinalityAllowInNativeFormat !== undefined && object.lowCardinalityAllowInNativeFormat !== null ? Boolean(object.lowCardinalityAllowInNativeFormat) : undefined; + message.allowSuspiciousLowCardinalityTypes = + object.allowSuspiciousLowCardinalityTypes !== undefined && + object.allowSuspiciousLowCardinalityTypes !== null + ? Boolean(object.allowSuspiciousLowCardinalityTypes) + : undefined; message.emptyResultForAggregationByEmptySet = object.emptyResultForAggregationByEmptySet !== undefined && object.emptyResultForAggregationByEmptySet !== null ? Boolean(object.emptyResultForAggregationByEmptySet) : undefined; - message.joinedSubqueryRequiresAlias = - object.joinedSubqueryRequiresAlias !== undefined && - object.joinedSubqueryRequiresAlias !== null - ? Boolean(object.joinedSubqueryRequiresAlias) - : undefined; - message.joinUseNulls = - object.joinUseNulls !== undefined && object.joinUseNulls !== null - ? Boolean(object.joinUseNulls) - : undefined; - message.transformNullIn = - object.transformNullIn !== undefined && object.transformNullIn !== null - ? Boolean(object.transformNullIn) - : undefined; message.httpConnectionTimeout = object.httpConnectionTimeout !== undefined && object.httpConnectionTimeout !== null @@ -3022,10 +4207,173 @@ export const UserSettings = { object.addHttpCorsHeader !== null ? Boolean(object.addHttpCorsHeader) : undefined; + message.cancelHttpReadonlyQueriesOnClientClose = + object.cancelHttpReadonlyQueriesOnClientClose !== undefined && + object.cancelHttpReadonlyQueriesOnClientClose !== null + ? Boolean(object.cancelHttpReadonlyQueriesOnClientClose) + : undefined; + message.maxHttpGetRedirects = + object.maxHttpGetRedirects !== undefined && + object.maxHttpGetRedirects !== null + ? Number(object.maxHttpGetRedirects) + : undefined; + message.joinedSubqueryRequiresAlias = + object.joinedSubqueryRequiresAlias !== undefined && + object.joinedSubqueryRequiresAlias !== null + ? Boolean(object.joinedSubqueryRequiresAlias) + : undefined; + message.joinUseNulls = + object.joinUseNulls !== undefined && object.joinUseNulls !== null + ? Boolean(object.joinUseNulls) + : undefined; + message.transformNullIn = + object.transformNullIn !== undefined && object.transformNullIn !== null + ? Boolean(object.transformNullIn) + : undefined; message.quotaMode = object.quotaMode !== undefined && object.quotaMode !== null ? userSettings_QuotaModeFromJSON(object.quotaMode) : 0; + message.flattenNested = + object.flattenNested !== undefined && object.flattenNested !== null + ? Boolean(object.flattenNested) + : undefined; + message.formatRegexp = + object.formatRegexp !== undefined && object.formatRegexp !== null + ? String(object.formatRegexp) + : ""; + message.formatRegexpEscapingRule = + object.formatRegexpEscapingRule !== undefined && + object.formatRegexpEscapingRule !== null + ? userSettings_FormatRegexpEscapingRuleFromJSON( + object.formatRegexpEscapingRule + ) + : 0; + message.formatRegexpSkipUnmatched = + object.formatRegexpSkipUnmatched !== undefined && + object.formatRegexpSkipUnmatched !== null + ? Boolean(object.formatRegexpSkipUnmatched) + : undefined; + message.asyncInsert = + object.asyncInsert !== undefined && object.asyncInsert !== null + ? Boolean(object.asyncInsert) + : undefined; + message.asyncInsertThreads = + object.asyncInsertThreads !== undefined && + object.asyncInsertThreads !== null + ? Number(object.asyncInsertThreads) + : undefined; + message.waitForAsyncInsert = + object.waitForAsyncInsert !== undefined && + object.waitForAsyncInsert !== null + ? Boolean(object.waitForAsyncInsert) + : undefined; + message.waitForAsyncInsertTimeout = + object.waitForAsyncInsertTimeout !== undefined && + object.waitForAsyncInsertTimeout !== null + ? Number(object.waitForAsyncInsertTimeout) + : undefined; + message.asyncInsertMaxDataSize = + object.asyncInsertMaxDataSize !== undefined && + object.asyncInsertMaxDataSize !== null + ? Number(object.asyncInsertMaxDataSize) + : undefined; + message.asyncInsertBusyTimeout = + object.asyncInsertBusyTimeout !== undefined && + object.asyncInsertBusyTimeout !== null + ? Number(object.asyncInsertBusyTimeout) + : undefined; + message.asyncInsertStaleTimeout = + object.asyncInsertStaleTimeout !== undefined && + object.asyncInsertStaleTimeout !== null + ? Number(object.asyncInsertStaleTimeout) + : undefined; + message.memoryProfilerStep = + object.memoryProfilerStep !== undefined && + object.memoryProfilerStep !== null + ? Number(object.memoryProfilerStep) + : undefined; + message.memoryProfilerSampleProbability = + object.memoryProfilerSampleProbability !== undefined && + object.memoryProfilerSampleProbability !== null + ? Number(object.memoryProfilerSampleProbability) + : undefined; + message.maxFinalThreads = + object.maxFinalThreads !== undefined && object.maxFinalThreads !== null + ? Number(object.maxFinalThreads) + : undefined; + message.inputFormatParallelParsing = + object.inputFormatParallelParsing !== undefined && + object.inputFormatParallelParsing !== null + ? Boolean(object.inputFormatParallelParsing) + : undefined; + message.inputFormatImportNestedJson = + object.inputFormatImportNestedJson !== undefined && + object.inputFormatImportNestedJson !== null + ? Boolean(object.inputFormatImportNestedJson) + : undefined; + message.localFilesystemReadMethod = + object.localFilesystemReadMethod !== undefined && + object.localFilesystemReadMethod !== null + ? userSettings_LocalFilesystemReadMethodFromJSON( + object.localFilesystemReadMethod + ) + : 0; + message.maxReadBufferSize = + object.maxReadBufferSize !== undefined && + object.maxReadBufferSize !== null + ? Number(object.maxReadBufferSize) + : undefined; + message.insertKeeperMaxRetries = + object.insertKeeperMaxRetries !== undefined && + object.insertKeeperMaxRetries !== null + ? Number(object.insertKeeperMaxRetries) + : undefined; + message.maxTemporaryDataOnDiskSizeForUser = + object.maxTemporaryDataOnDiskSizeForUser !== undefined && + object.maxTemporaryDataOnDiskSizeForUser !== null + ? Number(object.maxTemporaryDataOnDiskSizeForUser) + : undefined; + message.maxTemporaryDataOnDiskSizeForQuery = + object.maxTemporaryDataOnDiskSizeForQuery !== undefined && + object.maxTemporaryDataOnDiskSizeForQuery !== null + ? Number(object.maxTemporaryDataOnDiskSizeForQuery) + : undefined; + message.maxParserDepth = + object.maxParserDepth !== undefined && object.maxParserDepth !== null + ? Number(object.maxParserDepth) + : undefined; + message.remoteFilesystemReadMethod = + object.remoteFilesystemReadMethod !== undefined && + object.remoteFilesystemReadMethod !== null + ? userSettings_RemoteFilesystemReadMethodFromJSON( + object.remoteFilesystemReadMethod + ) + : 0; + message.memoryOvercommitRatioDenominator = + object.memoryOvercommitRatioDenominator !== undefined && + object.memoryOvercommitRatioDenominator !== null + ? Number(object.memoryOvercommitRatioDenominator) + : undefined; + message.memoryOvercommitRatioDenominatorForUser = + object.memoryOvercommitRatioDenominatorForUser !== undefined && + object.memoryOvercommitRatioDenominatorForUser !== null + ? Number(object.memoryOvercommitRatioDenominatorForUser) + : undefined; + message.memoryUsageOvercommitMaxWaitMicroseconds = + object.memoryUsageOvercommitMaxWaitMicroseconds !== undefined && + object.memoryUsageOvercommitMaxWaitMicroseconds !== null + ? Number(object.memoryUsageOvercommitMaxWaitMicroseconds) + : undefined; + message.compile = + object.compile !== undefined && object.compile !== null + ? Boolean(object.compile) + : undefined; + message.minCountToCompile = + object.minCountToCompile !== undefined && + object.minCountToCompile !== null + ? Number(object.minCountToCompile) + : undefined; return message; }, @@ -3033,27 +4381,41 @@ export const UserSettings = { const obj: any = {}; message.readonly !== undefined && (obj.readonly = message.readonly); message.allowDdl !== undefined && (obj.allowDdl = message.allowDdl); - message.insertQuorum !== undefined && - (obj.insertQuorum = message.insertQuorum); + message.allowIntrospectionFunctions !== undefined && + (obj.allowIntrospectionFunctions = message.allowIntrospectionFunctions); message.connectTimeout !== undefined && (obj.connectTimeout = message.connectTimeout); + message.connectTimeoutWithFailover !== undefined && + (obj.connectTimeoutWithFailover = message.connectTimeoutWithFailover); message.receiveTimeout !== undefined && (obj.receiveTimeout = message.receiveTimeout); message.sendTimeout !== undefined && (obj.sendTimeout = message.sendTimeout); + message.timeoutBeforeCheckingExecutionSpeed !== undefined && + (obj.timeoutBeforeCheckingExecutionSpeed = + message.timeoutBeforeCheckingExecutionSpeed); + message.insertQuorum !== undefined && + (obj.insertQuorum = message.insertQuorum); message.insertQuorumTimeout !== undefined && (obj.insertQuorumTimeout = message.insertQuorumTimeout); + message.insertQuorumParallel !== undefined && + (obj.insertQuorumParallel = message.insertQuorumParallel); + message.insertNullAsDefault !== undefined && + (obj.insertNullAsDefault = message.insertNullAsDefault); message.selectSequentialConsistency !== undefined && (obj.selectSequentialConsistency = message.selectSequentialConsistency); + message.deduplicateBlocksInDependentMaterializedViews !== undefined && + (obj.deduplicateBlocksInDependentMaterializedViews = + message.deduplicateBlocksInDependentMaterializedViews); + message.replicationAlterPartitionsSync !== undefined && + (obj.replicationAlterPartitionsSync = + message.replicationAlterPartitionsSync); message.maxReplicaDelayForDistributedQueries !== undefined && (obj.maxReplicaDelayForDistributedQueries = message.maxReplicaDelayForDistributedQueries); message.fallbackToStaleReplicasForDistributedQueries !== undefined && (obj.fallbackToStaleReplicasForDistributedQueries = message.fallbackToStaleReplicasForDistributedQueries); - message.replicationAlterPartitionsSync !== undefined && - (obj.replicationAlterPartitionsSync = - message.replicationAlterPartitionsSync); message.distributedProductMode !== undefined && (obj.distributedProductMode = userSettings_DistributedProductModeToJSON( message.distributedProductMode @@ -3065,9 +4427,6 @@ export const UserSettings = { (obj.distributedDdlTaskTimeout = message.distributedDdlTaskTimeout); message.skipUnavailableShards !== undefined && (obj.skipUnavailableShards = message.skipUnavailableShards); - message.compile !== undefined && (obj.compile = message.compile); - message.minCountToCompile !== undefined && - (obj.minCountToCompile = message.minCountToCompile); message.compileExpressions !== undefined && (obj.compileExpressions = message.compileExpressions); message.minCountToCompileExpression !== undefined && @@ -3114,6 +4473,10 @@ export const UserSettings = { (obj.maxNetworkBandwidth = message.maxNetworkBandwidth); message.maxNetworkBandwidthForUser !== undefined && (obj.maxNetworkBandwidthForUser = message.maxNetworkBandwidthForUser); + message.maxPartitionsPerInsertBlock !== undefined && + (obj.maxPartitionsPerInsertBlock = message.maxPartitionsPerInsertBlock); + message.maxConcurrentQueriesForUser !== undefined && + (obj.maxConcurrentQueriesForUser = message.maxConcurrentQueriesForUser); message.forceIndexByDate !== undefined && (obj.forceIndexByDate = message.forceIndexByDate); message.forcePrimaryKey !== undefined && @@ -3186,6 +4549,16 @@ export const UserSettings = { (obj.joinOverflowMode = userSettings_OverflowModeToJSON( message.joinOverflowMode )); + if (message.joinAlgorithm) { + obj.joinAlgorithm = message.joinAlgorithm.map((e) => + userSettings_JoinAlgorithmToJSON(e) + ); + } else { + obj.joinAlgorithm = []; + } + message.anyJoinDistinctRightTableKeys !== undefined && + (obj.anyJoinDistinctRightTableKeys = + message.anyJoinDistinctRightTableKeys); message.maxColumnsToRead !== undefined && (obj.maxColumnsToRead = message.maxColumnsToRead); message.maxTemporaryColumns !== undefined && @@ -3215,24 +4588,34 @@ export const UserSettings = { message.inputFormatDefaultsForOmittedFields !== undefined && (obj.inputFormatDefaultsForOmittedFields = message.inputFormatDefaultsForOmittedFields); + message.inputFormatNullAsDefault !== undefined && + (obj.inputFormatNullAsDefault = message.inputFormatNullAsDefault); + message.dateTimeInputFormat !== undefined && + (obj.dateTimeInputFormat = userSettings_DateTimeInputFormatToJSON( + message.dateTimeInputFormat + )); + message.inputFormatWithNamesUseHeader !== undefined && + (obj.inputFormatWithNamesUseHeader = + message.inputFormatWithNamesUseHeader); message.outputFormatJsonQuote64bitIntegers !== undefined && (obj.outputFormatJsonQuote_64bitIntegers = message.outputFormatJsonQuote64bitIntegers); message.outputFormatJsonQuoteDenormals !== undefined && (obj.outputFormatJsonQuoteDenormals = message.outputFormatJsonQuoteDenormals); + message.dateTimeOutputFormat !== undefined && + (obj.dateTimeOutputFormat = userSettings_DateTimeOutputFormatToJSON( + message.dateTimeOutputFormat + )); message.lowCardinalityAllowInNativeFormat !== undefined && (obj.lowCardinalityAllowInNativeFormat = message.lowCardinalityAllowInNativeFormat); + message.allowSuspiciousLowCardinalityTypes !== undefined && + (obj.allowSuspiciousLowCardinalityTypes = + message.allowSuspiciousLowCardinalityTypes); message.emptyResultForAggregationByEmptySet !== undefined && (obj.emptyResultForAggregationByEmptySet = message.emptyResultForAggregationByEmptySet); - message.joinedSubqueryRequiresAlias !== undefined && - (obj.joinedSubqueryRequiresAlias = message.joinedSubqueryRequiresAlias); - message.joinUseNulls !== undefined && - (obj.joinUseNulls = message.joinUseNulls); - message.transformNullIn !== undefined && - (obj.transformNullIn = message.transformNullIn); message.httpConnectionTimeout !== undefined && (obj.httpConnectionTimeout = message.httpConnectionTimeout); message.httpReceiveTimeout !== undefined && @@ -3247,8 +4630,89 @@ export const UserSettings = { (obj.httpHeadersProgressInterval = message.httpHeadersProgressInterval); message.addHttpCorsHeader !== undefined && (obj.addHttpCorsHeader = message.addHttpCorsHeader); + message.cancelHttpReadonlyQueriesOnClientClose !== undefined && + (obj.cancelHttpReadonlyQueriesOnClientClose = + message.cancelHttpReadonlyQueriesOnClientClose); + message.maxHttpGetRedirects !== undefined && + (obj.maxHttpGetRedirects = message.maxHttpGetRedirects); + message.joinedSubqueryRequiresAlias !== undefined && + (obj.joinedSubqueryRequiresAlias = message.joinedSubqueryRequiresAlias); + message.joinUseNulls !== undefined && + (obj.joinUseNulls = message.joinUseNulls); + message.transformNullIn !== undefined && + (obj.transformNullIn = message.transformNullIn); message.quotaMode !== undefined && (obj.quotaMode = userSettings_QuotaModeToJSON(message.quotaMode)); + message.flattenNested !== undefined && + (obj.flattenNested = message.flattenNested); + message.formatRegexp !== undefined && + (obj.formatRegexp = message.formatRegexp); + message.formatRegexpEscapingRule !== undefined && + (obj.formatRegexpEscapingRule = + userSettings_FormatRegexpEscapingRuleToJSON( + message.formatRegexpEscapingRule + )); + message.formatRegexpSkipUnmatched !== undefined && + (obj.formatRegexpSkipUnmatched = message.formatRegexpSkipUnmatched); + message.asyncInsert !== undefined && + (obj.asyncInsert = message.asyncInsert); + message.asyncInsertThreads !== undefined && + (obj.asyncInsertThreads = message.asyncInsertThreads); + message.waitForAsyncInsert !== undefined && + (obj.waitForAsyncInsert = message.waitForAsyncInsert); + message.waitForAsyncInsertTimeout !== undefined && + (obj.waitForAsyncInsertTimeout = message.waitForAsyncInsertTimeout); + message.asyncInsertMaxDataSize !== undefined && + (obj.asyncInsertMaxDataSize = message.asyncInsertMaxDataSize); + message.asyncInsertBusyTimeout !== undefined && + (obj.asyncInsertBusyTimeout = message.asyncInsertBusyTimeout); + message.asyncInsertStaleTimeout !== undefined && + (obj.asyncInsertStaleTimeout = message.asyncInsertStaleTimeout); + message.memoryProfilerStep !== undefined && + (obj.memoryProfilerStep = message.memoryProfilerStep); + message.memoryProfilerSampleProbability !== undefined && + (obj.memoryProfilerSampleProbability = + message.memoryProfilerSampleProbability); + message.maxFinalThreads !== undefined && + (obj.maxFinalThreads = message.maxFinalThreads); + message.inputFormatParallelParsing !== undefined && + (obj.inputFormatParallelParsing = message.inputFormatParallelParsing); + message.inputFormatImportNestedJson !== undefined && + (obj.inputFormatImportNestedJson = message.inputFormatImportNestedJson); + message.localFilesystemReadMethod !== undefined && + (obj.localFilesystemReadMethod = + userSettings_LocalFilesystemReadMethodToJSON( + message.localFilesystemReadMethod + )); + message.maxReadBufferSize !== undefined && + (obj.maxReadBufferSize = message.maxReadBufferSize); + message.insertKeeperMaxRetries !== undefined && + (obj.insertKeeperMaxRetries = message.insertKeeperMaxRetries); + message.maxTemporaryDataOnDiskSizeForUser !== undefined && + (obj.maxTemporaryDataOnDiskSizeForUser = + message.maxTemporaryDataOnDiskSizeForUser); + message.maxTemporaryDataOnDiskSizeForQuery !== undefined && + (obj.maxTemporaryDataOnDiskSizeForQuery = + message.maxTemporaryDataOnDiskSizeForQuery); + message.maxParserDepth !== undefined && + (obj.maxParserDepth = message.maxParserDepth); + message.remoteFilesystemReadMethod !== undefined && + (obj.remoteFilesystemReadMethod = + userSettings_RemoteFilesystemReadMethodToJSON( + message.remoteFilesystemReadMethod + )); + message.memoryOvercommitRatioDenominator !== undefined && + (obj.memoryOvercommitRatioDenominator = + message.memoryOvercommitRatioDenominator); + message.memoryOvercommitRatioDenominatorForUser !== undefined && + (obj.memoryOvercommitRatioDenominatorForUser = + message.memoryOvercommitRatioDenominatorForUser); + message.memoryUsageOvercommitMaxWaitMicroseconds !== undefined && + (obj.memoryUsageOvercommitMaxWaitMicroseconds = + message.memoryUsageOvercommitMaxWaitMicroseconds); + message.compile !== undefined && (obj.compile = message.compile); + message.minCountToCompile !== undefined && + (obj.minCountToCompile = message.minCountToCompile); return obj; }, @@ -3258,27 +4722,35 @@ export const UserSettings = { const message = { ...baseUserSettings } as UserSettings; message.readonly = object.readonly ?? undefined; message.allowDdl = object.allowDdl ?? undefined; - message.insertQuorum = object.insertQuorum ?? undefined; + message.allowIntrospectionFunctions = + object.allowIntrospectionFunctions ?? undefined; message.connectTimeout = object.connectTimeout ?? undefined; + message.connectTimeoutWithFailover = + object.connectTimeoutWithFailover ?? undefined; message.receiveTimeout = object.receiveTimeout ?? undefined; message.sendTimeout = object.sendTimeout ?? undefined; + message.timeoutBeforeCheckingExecutionSpeed = + object.timeoutBeforeCheckingExecutionSpeed ?? undefined; + message.insertQuorum = object.insertQuorum ?? undefined; message.insertQuorumTimeout = object.insertQuorumTimeout ?? undefined; + message.insertQuorumParallel = object.insertQuorumParallel ?? undefined; + message.insertNullAsDefault = object.insertNullAsDefault ?? undefined; message.selectSequentialConsistency = object.selectSequentialConsistency ?? undefined; + message.deduplicateBlocksInDependentMaterializedViews = + object.deduplicateBlocksInDependentMaterializedViews ?? undefined; + message.replicationAlterPartitionsSync = + object.replicationAlterPartitionsSync ?? undefined; message.maxReplicaDelayForDistributedQueries = object.maxReplicaDelayForDistributedQueries ?? undefined; message.fallbackToStaleReplicasForDistributedQueries = object.fallbackToStaleReplicasForDistributedQueries ?? undefined; - message.replicationAlterPartitionsSync = - object.replicationAlterPartitionsSync ?? undefined; message.distributedProductMode = object.distributedProductMode ?? 0; message.distributedAggregationMemoryEfficient = object.distributedAggregationMemoryEfficient ?? undefined; message.distributedDdlTaskTimeout = object.distributedDdlTaskTimeout ?? undefined; message.skipUnavailableShards = object.skipUnavailableShards ?? undefined; - message.compile = object.compile ?? undefined; - message.minCountToCompile = object.minCountToCompile ?? undefined; message.compileExpressions = object.compileExpressions ?? undefined; message.minCountToCompileExpression = object.minCountToCompileExpression ?? undefined; @@ -3312,6 +4784,10 @@ export const UserSettings = { message.maxNetworkBandwidth = object.maxNetworkBandwidth ?? undefined; message.maxNetworkBandwidthForUser = object.maxNetworkBandwidthForUser ?? undefined; + message.maxPartitionsPerInsertBlock = + object.maxPartitionsPerInsertBlock ?? undefined; + message.maxConcurrentQueriesForUser = + object.maxConcurrentQueriesForUser ?? undefined; message.forceIndexByDate = object.forceIndexByDate ?? undefined; message.forcePrimaryKey = object.forcePrimaryKey ?? undefined; message.maxRowsToRead = object.maxRowsToRead ?? undefined; @@ -3339,6 +4815,9 @@ export const UserSettings = { message.maxRowsInJoin = object.maxRowsInJoin ?? undefined; message.maxBytesInJoin = object.maxBytesInJoin ?? undefined; message.joinOverflowMode = object.joinOverflowMode ?? 0; + message.joinAlgorithm = object.joinAlgorithm?.map((e) => e) || []; + message.anyJoinDistinctRightTableKeys = + object.anyJoinDistinctRightTableKeys ?? undefined; message.maxColumnsToRead = object.maxColumnsToRead ?? undefined; message.maxTemporaryColumns = object.maxTemporaryColumns ?? undefined; message.maxTemporaryNonConstColumns = @@ -3355,18 +4834,22 @@ export const UserSettings = { object.inputFormatValuesInterpretExpressions ?? undefined; message.inputFormatDefaultsForOmittedFields = object.inputFormatDefaultsForOmittedFields ?? undefined; + message.inputFormatNullAsDefault = + object.inputFormatNullAsDefault ?? undefined; + message.dateTimeInputFormat = object.dateTimeInputFormat ?? 0; + message.inputFormatWithNamesUseHeader = + object.inputFormatWithNamesUseHeader ?? undefined; message.outputFormatJsonQuote64bitIntegers = object.outputFormatJsonQuote64bitIntegers ?? undefined; message.outputFormatJsonQuoteDenormals = object.outputFormatJsonQuoteDenormals ?? undefined; + message.dateTimeOutputFormat = object.dateTimeOutputFormat ?? 0; message.lowCardinalityAllowInNativeFormat = object.lowCardinalityAllowInNativeFormat ?? undefined; + message.allowSuspiciousLowCardinalityTypes = + object.allowSuspiciousLowCardinalityTypes ?? undefined; message.emptyResultForAggregationByEmptySet = object.emptyResultForAggregationByEmptySet ?? undefined; - message.joinedSubqueryRequiresAlias = - object.joinedSubqueryRequiresAlias ?? undefined; - message.joinUseNulls = object.joinUseNulls ?? undefined; - message.transformNullIn = object.transformNullIn ?? undefined; message.httpConnectionTimeout = object.httpConnectionTimeout ?? undefined; message.httpReceiveTimeout = object.httpReceiveTimeout ?? undefined; message.httpSendTimeout = object.httpSendTimeout ?? undefined; @@ -3376,7 +4859,53 @@ export const UserSettings = { message.httpHeadersProgressInterval = object.httpHeadersProgressInterval ?? undefined; message.addHttpCorsHeader = object.addHttpCorsHeader ?? undefined; + message.cancelHttpReadonlyQueriesOnClientClose = + object.cancelHttpReadonlyQueriesOnClientClose ?? undefined; + message.maxHttpGetRedirects = object.maxHttpGetRedirects ?? undefined; + message.joinedSubqueryRequiresAlias = + object.joinedSubqueryRequiresAlias ?? undefined; + message.joinUseNulls = object.joinUseNulls ?? undefined; + message.transformNullIn = object.transformNullIn ?? undefined; message.quotaMode = object.quotaMode ?? 0; + message.flattenNested = object.flattenNested ?? undefined; + message.formatRegexp = object.formatRegexp ?? ""; + message.formatRegexpEscapingRule = object.formatRegexpEscapingRule ?? 0; + message.formatRegexpSkipUnmatched = + object.formatRegexpSkipUnmatched ?? undefined; + message.asyncInsert = object.asyncInsert ?? undefined; + message.asyncInsertThreads = object.asyncInsertThreads ?? undefined; + message.waitForAsyncInsert = object.waitForAsyncInsert ?? undefined; + message.waitForAsyncInsertTimeout = + object.waitForAsyncInsertTimeout ?? undefined; + message.asyncInsertMaxDataSize = object.asyncInsertMaxDataSize ?? undefined; + message.asyncInsertBusyTimeout = object.asyncInsertBusyTimeout ?? undefined; + message.asyncInsertStaleTimeout = + object.asyncInsertStaleTimeout ?? undefined; + message.memoryProfilerStep = object.memoryProfilerStep ?? undefined; + message.memoryProfilerSampleProbability = + object.memoryProfilerSampleProbability ?? undefined; + message.maxFinalThreads = object.maxFinalThreads ?? undefined; + message.inputFormatParallelParsing = + object.inputFormatParallelParsing ?? undefined; + message.inputFormatImportNestedJson = + object.inputFormatImportNestedJson ?? undefined; + message.localFilesystemReadMethod = object.localFilesystemReadMethod ?? 0; + message.maxReadBufferSize = object.maxReadBufferSize ?? undefined; + message.insertKeeperMaxRetries = object.insertKeeperMaxRetries ?? undefined; + message.maxTemporaryDataOnDiskSizeForUser = + object.maxTemporaryDataOnDiskSizeForUser ?? undefined; + message.maxTemporaryDataOnDiskSizeForQuery = + object.maxTemporaryDataOnDiskSizeForQuery ?? undefined; + message.maxParserDepth = object.maxParserDepth ?? undefined; + message.remoteFilesystemReadMethod = object.remoteFilesystemReadMethod ?? 0; + message.memoryOvercommitRatioDenominator = + object.memoryOvercommitRatioDenominator ?? undefined; + message.memoryOvercommitRatioDenominatorForUser = + object.memoryOvercommitRatioDenominatorForUser ?? undefined; + message.memoryUsageOvercommitMaxWaitMicroseconds = + object.memoryUsageOvercommitMaxWaitMicroseconds ?? undefined; + message.compile = object.compile ?? undefined; + message.minCountToCompile = object.minCountToCompile ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts index 570c23bd..a502e06b 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts @@ -54,6 +54,20 @@ export interface ListBackupsResponse { nextPageToken: string; } +export interface DeleteBackupRequest { + $type: "yandex.cloud.mdb.greenplum.v1.DeleteBackupRequest"; + /** Required. ID of the backup to delete. */ + backupId: string; +} + +export interface DeleteBackupMetadata { + $type: "yandex.cloud.mdb.greenplum.v1.DeleteBackupMetadata"; + /** Required. ID of the Greenplum backup that is currently being deleted. */ + backupId: string; + /** ID of the Greenplum backup that is being created. */ + clusterId: string; +} + const baseGetBackupRequest: object = { $type: "yandex.cloud.mdb.greenplum.v1.GetBackupRequest", backupId: "", @@ -286,6 +300,146 @@ export const ListBackupsResponse = { messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); +const baseDeleteBackupRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.DeleteBackupRequest", + backupId: "", +}; + +export const DeleteBackupRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.DeleteBackupRequest" as const, + + encode( + message: DeleteBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBackupRequest { + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: DeleteBackupRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBackupRequest { + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBackupRequest.$type, DeleteBackupRequest); + +const baseDeleteBackupMetadata: object = { + $type: "yandex.cloud.mdb.greenplum.v1.DeleteBackupMetadata", + backupId: "", + clusterId: "", +}; + +export const DeleteBackupMetadata = { + $type: "yandex.cloud.mdb.greenplum.v1.DeleteBackupMetadata" as const, + + encode( + message: DeleteBackupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + if (message.clusterId !== "") { + writer.uint32(18).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBackupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + case 2: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBackupMetadata { + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: DeleteBackupMetadata): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBackupMetadata { + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + message.backupId = object.backupId ?? ""; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBackupMetadata.$type, DeleteBackupMetadata); + /** A set of methods for managing backups. */ export const BackupServiceService = { /** Returns the specified backup of Greenplum® cluster. */ diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts index 53bdc513..1fad603c 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts @@ -6,13 +6,18 @@ import { MasterSubclusterConfig, SegmentSubclusterConfig, ConnectionPoolerConfigSet, + BackgroundActivitiesConfig, Greenplumconfigset617, Greenplumconfigset619, + Greenplumconfigset621, + Greenplumconfigset622, + GreenplumConfigSet6, } from "../../../../../yandex/cloud/mdb/greenplum/v1/config"; import { MaintenanceWindow, MaintenanceOperation, } from "../../../../../yandex/cloud/mdb/greenplum/v1/maintenance"; +import { PXFConfigSet } from "../../../../../yandex/cloud/mdb/greenplum/v1/pxf"; import { TimeOfDay } from "../../../../../google/type/timeofday"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; @@ -75,6 +80,8 @@ export interface Cluster { hostGroupIds: string[]; /** Greenplum® and Odyssey® configuration. */ clusterConfig?: ClusterConfigSet; + /** Cloud storage settings */ + cloudStorage?: CloudStorage; } export enum Cluster_Environment { @@ -258,8 +265,13 @@ export interface ClusterConfigSet { $type: "yandex.cloud.mdb.greenplum.v1.ClusterConfigSet"; greenplumConfigSet617?: Greenplumconfigset617 | undefined; greenplumConfigSet619?: Greenplumconfigset619 | undefined; + greenplumConfigSet621?: Greenplumconfigset621 | undefined; + greenplumConfigSet622?: Greenplumconfigset622 | undefined; + greenplumConfigSet6?: GreenplumConfigSet6 | undefined; /** Odyssey® pool settings. */ pool?: ConnectionPoolerConfigSet; + backgroundActivities?: BackgroundActivitiesConfig; + pxfConfig?: PXFConfigSet; } /** Monitoring system metadata. */ @@ -345,6 +357,13 @@ export interface RestoreResources { diskSize: number; } +/** Cloud Storage Settings */ +export interface CloudStorage { + $type: "yandex.cloud.mdb.greenplum.v1.CloudStorage"; + /** enable Cloud Storage for cluster */ + enable: boolean; +} + const baseCluster: object = { $type: "yandex.cloud.mdb.greenplum.v1.Cluster", id: "", @@ -468,6 +487,12 @@ export const Cluster = { writer.uint32(194).fork() ).ldelim(); } + if (message.cloudStorage !== undefined) { + CloudStorage.encode( + message.cloudStorage, + writer.uint32(210).fork() + ).ldelim(); + } return writer; }, @@ -574,6 +599,9 @@ export const Cluster = { reader.uint32() ); break; + case 26: + message.cloudStorage = CloudStorage.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -679,6 +707,10 @@ export const Cluster = { object.clusterConfig !== undefined && object.clusterConfig !== null ? ClusterConfigSet.fromJSON(object.clusterConfig) : undefined; + message.cloudStorage = + object.cloudStorage !== undefined && object.cloudStorage !== null + ? CloudStorage.fromJSON(object.cloudStorage) + : undefined; return message; }, @@ -754,6 +786,10 @@ export const Cluster = { (obj.clusterConfig = message.clusterConfig ? ClusterConfigSet.toJSON(message.clusterConfig) : undefined); + message.cloudStorage !== undefined && + (obj.cloudStorage = message.cloudStorage + ? CloudStorage.toJSON(message.cloudStorage) + : undefined); return obj; }, @@ -810,6 +846,10 @@ export const Cluster = { object.clusterConfig !== undefined && object.clusterConfig !== null ? ClusterConfigSet.fromPartial(object.clusterConfig) : undefined; + message.cloudStorage = + object.cloudStorage !== undefined && object.cloudStorage !== null + ? CloudStorage.fromPartial(object.cloudStorage) + : undefined; return message; }, }; @@ -912,12 +952,39 @@ export const ClusterConfigSet = { writer.uint32(18).fork() ).ldelim(); } + if (message.greenplumConfigSet621 !== undefined) { + Greenplumconfigset621.encode( + message.greenplumConfigSet621, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.greenplumConfigSet622 !== undefined) { + Greenplumconfigset622.encode( + message.greenplumConfigSet622, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.greenplumConfigSet6 !== undefined) { + GreenplumConfigSet6.encode( + message.greenplumConfigSet6, + writer.uint32(74).fork() + ).ldelim(); + } if (message.pool !== undefined) { ConnectionPoolerConfigSet.encode( message.pool, writer.uint32(26).fork() ).ldelim(); } + if (message.backgroundActivities !== undefined) { + BackgroundActivitiesConfig.encode( + message.backgroundActivities, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.pxfConfig !== undefined) { + PXFConfigSet.encode(message.pxfConfig, writer.uint32(66).fork()).ldelim(); + } return writer; }, @@ -940,12 +1007,39 @@ export const ClusterConfigSet = { reader.uint32() ); break; + case 4: + message.greenplumConfigSet621 = Greenplumconfigset621.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.greenplumConfigSet622 = Greenplumconfigset622.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.greenplumConfigSet6 = GreenplumConfigSet6.decode( + reader, + reader.uint32() + ); + break; case 3: message.pool = ConnectionPoolerConfigSet.decode( reader, reader.uint32() ); break; + case 6: + message.backgroundActivities = BackgroundActivitiesConfig.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.pxfConfig = PXFConfigSet.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -966,10 +1060,34 @@ export const ClusterConfigSet = { object.greenplumConfigSet_6_19 !== null ? Greenplumconfigset619.fromJSON(object.greenplumConfigSet_6_19) : undefined; + message.greenplumConfigSet621 = + object.greenplumConfigSet_6_21 !== undefined && + object.greenplumConfigSet_6_21 !== null + ? Greenplumconfigset621.fromJSON(object.greenplumConfigSet_6_21) + : undefined; + message.greenplumConfigSet622 = + object.greenplumConfigSet_6_22 !== undefined && + object.greenplumConfigSet_6_22 !== null + ? Greenplumconfigset622.fromJSON(object.greenplumConfigSet_6_22) + : undefined; + message.greenplumConfigSet6 = + object.greenplumConfigSet_6 !== undefined && + object.greenplumConfigSet_6 !== null + ? GreenplumConfigSet6.fromJSON(object.greenplumConfigSet_6) + : undefined; message.pool = object.pool !== undefined && object.pool !== null ? ConnectionPoolerConfigSet.fromJSON(object.pool) : undefined; + message.backgroundActivities = + object.backgroundActivities !== undefined && + object.backgroundActivities !== null + ? BackgroundActivitiesConfig.fromJSON(object.backgroundActivities) + : undefined; + message.pxfConfig = + object.pxfConfig !== undefined && object.pxfConfig !== null + ? PXFConfigSet.fromJSON(object.pxfConfig) + : undefined; return message; }, @@ -983,10 +1101,30 @@ export const ClusterConfigSet = { (obj.greenplumConfigSet_6_19 = message.greenplumConfigSet619 ? Greenplumconfigset619.toJSON(message.greenplumConfigSet619) : undefined); + message.greenplumConfigSet621 !== undefined && + (obj.greenplumConfigSet_6_21 = message.greenplumConfigSet621 + ? Greenplumconfigset621.toJSON(message.greenplumConfigSet621) + : undefined); + message.greenplumConfigSet622 !== undefined && + (obj.greenplumConfigSet_6_22 = message.greenplumConfigSet622 + ? Greenplumconfigset622.toJSON(message.greenplumConfigSet622) + : undefined); + message.greenplumConfigSet6 !== undefined && + (obj.greenplumConfigSet_6 = message.greenplumConfigSet6 + ? GreenplumConfigSet6.toJSON(message.greenplumConfigSet6) + : undefined); message.pool !== undefined && (obj.pool = message.pool ? ConnectionPoolerConfigSet.toJSON(message.pool) : undefined); + message.backgroundActivities !== undefined && + (obj.backgroundActivities = message.backgroundActivities + ? BackgroundActivitiesConfig.toJSON(message.backgroundActivities) + : undefined); + message.pxfConfig !== undefined && + (obj.pxfConfig = message.pxfConfig + ? PXFConfigSet.toJSON(message.pxfConfig) + : undefined); return obj; }, @@ -1004,10 +1142,34 @@ export const ClusterConfigSet = { object.greenplumConfigSet619 !== null ? Greenplumconfigset619.fromPartial(object.greenplumConfigSet619) : undefined; + message.greenplumConfigSet621 = + object.greenplumConfigSet621 !== undefined && + object.greenplumConfigSet621 !== null + ? Greenplumconfigset621.fromPartial(object.greenplumConfigSet621) + : undefined; + message.greenplumConfigSet622 = + object.greenplumConfigSet622 !== undefined && + object.greenplumConfigSet622 !== null + ? Greenplumconfigset622.fromPartial(object.greenplumConfigSet622) + : undefined; + message.greenplumConfigSet6 = + object.greenplumConfigSet6 !== undefined && + object.greenplumConfigSet6 !== null + ? GreenplumConfigSet6.fromPartial(object.greenplumConfigSet6) + : undefined; message.pool = object.pool !== undefined && object.pool !== null ? ConnectionPoolerConfigSet.fromPartial(object.pool) : undefined; + message.backgroundActivities = + object.backgroundActivities !== undefined && + object.backgroundActivities !== null + ? BackgroundActivitiesConfig.fromPartial(object.backgroundActivities) + : undefined; + message.pxfConfig = + object.pxfConfig !== undefined && object.pxfConfig !== null + ? PXFConfigSet.fromPartial(object.pxfConfig) + : undefined; return message; }, }; @@ -1539,6 +1701,68 @@ export const RestoreResources = { messageTypeRegistry.set(RestoreResources.$type, RestoreResources); +const baseCloudStorage: object = { + $type: "yandex.cloud.mdb.greenplum.v1.CloudStorage", + enable: false, +}; + +export const CloudStorage = { + $type: "yandex.cloud.mdb.greenplum.v1.CloudStorage" as const, + + encode( + message: CloudStorage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enable === true) { + writer.uint32(8).bool(message.enable); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CloudStorage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCloudStorage } as CloudStorage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enable = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CloudStorage { + const message = { ...baseCloudStorage } as CloudStorage; + message.enable = + object.enable !== undefined && object.enable !== null + ? Boolean(object.enable) + : false; + return message; + }, + + toJSON(message: CloudStorage): unknown { + const obj: any = {}; + message.enable !== undefined && (obj.enable = message.enable); + return obj; + }, + + fromPartial, I>>( + object: I + ): CloudStorage { + const message = { ...baseCloudStorage } as CloudStorage; + message.enable = object.enable ?? false; + return message; + }, +}; + +messageTypeRegistry.set(CloudStorage.$type, CloudStorage); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts index 6d52150d..9ad77b14 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts @@ -19,6 +19,7 @@ import _m0 from "protobufjs/minimal"; import { Cluster_Environment, GreenplumConfig, + CloudStorage, GreenplumRestoreConfig, Cluster, cluster_EnvironmentFromJSON, @@ -27,10 +28,15 @@ import { import { MaintenanceWindow } from "../../../../../yandex/cloud/mdb/greenplum/v1/maintenance"; import { ConnectionPoolerConfig, + BackgroundActivitiesConfig, Resources, Greenplumconfig617, Greenplumconfig619, + Greenplumconfig621, + Greenplumconfig622, + GreenplumConfig6, } from "../../../../../yandex/cloud/mdb/greenplum/v1/config"; +import { PXFConfig } from "../../../../../yandex/cloud/mdb/greenplum/v1/pxf"; import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; @@ -136,6 +142,8 @@ export interface CreateClusterRequest { maintenanceWindow?: MaintenanceWindow; /** Configuration of Greenplum® and Odyssey®. */ configSpec?: ConfigSpec; + /** Cloud storage settings */ + cloudStorage?: CloudStorage; } export interface CreateClusterRequest_LabelsEntry { @@ -148,8 +156,13 @@ export interface ConfigSpec { $type: "yandex.cloud.mdb.greenplum.v1.ConfigSpec"; greenplumConfig617?: Greenplumconfig617 | undefined; greenplumConfig619?: Greenplumconfig619 | undefined; + greenplumConfig621?: Greenplumconfig621 | undefined; + greenplumConfig622?: Greenplumconfig622 | undefined; + greenplumConfig6?: GreenplumConfig6 | undefined; /** Odyssey® pool settings. */ pool?: ConnectionPoolerConfig; + backgroundActivities?: BackgroundActivitiesConfig; + pxfConfig?: PXFConfig; } export interface CreateClusterMetadata { @@ -195,6 +208,8 @@ export interface UpdateClusterRequest { deletionProtection: boolean; /** Settings of the Greenplum® cluster. */ configSpec?: ConfigSpec; + /** Cloud storage settings */ + cloudStorage?: CloudStorage; } export interface UpdateClusterRequest_LabelsEntry { @@ -431,6 +446,8 @@ export enum ListClusterLogsRequest_ServiceType { GREENPLUM = 1, /** GREENPLUM_POOLER - Greenplum® pooler logs. */ GREENPLUM_POOLER = 2, + /** GREENPLUM_PXF - Greenplum® PXF service logs. */ + GREENPLUM_PXF = 3, UNRECOGNIZED = -1, } @@ -447,6 +464,9 @@ export function listClusterLogsRequest_ServiceTypeFromJSON( case 2: case "GREENPLUM_POOLER": return ListClusterLogsRequest_ServiceType.GREENPLUM_POOLER; + case 3: + case "GREENPLUM_PXF": + return ListClusterLogsRequest_ServiceType.GREENPLUM_PXF; case -1: case "UNRECOGNIZED": default: @@ -464,6 +484,8 @@ export function listClusterLogsRequest_ServiceTypeToJSON( return "GREENPLUM"; case ListClusterLogsRequest_ServiceType.GREENPLUM_POOLER: return "GREENPLUM_POOLER"; + case ListClusterLogsRequest_ServiceType.GREENPLUM_PXF: + return "GREENPLUM_PXF"; default: return "UNKNOWN"; } @@ -550,6 +572,8 @@ export enum StreamClusterLogsRequest_ServiceType { GREENPLUM = 1, /** GREENPLUM_POOLER - Greenplum® pooler logs. */ GREENPLUM_POOLER = 2, + /** GREENPLUM_PXF - Greenplum® PXF service logs. */ + GREENPLUM_PXF = 3, UNRECOGNIZED = -1, } @@ -566,6 +590,9 @@ export function streamClusterLogsRequest_ServiceTypeFromJSON( case 2: case "GREENPLUM_POOLER": return StreamClusterLogsRequest_ServiceType.GREENPLUM_POOLER; + case 3: + case "GREENPLUM_PXF": + return StreamClusterLogsRequest_ServiceType.GREENPLUM_PXF; case -1: case "UNRECOGNIZED": default: @@ -583,6 +610,8 @@ export function streamClusterLogsRequest_ServiceTypeToJSON( return "GREENPLUM"; case StreamClusterLogsRequest_ServiceType.GREENPLUM_POOLER: return "GREENPLUM_POOLER"; + case StreamClusterLogsRequest_ServiceType.GREENPLUM_PXF: + return "GREENPLUM_PXF"; default: return "UNKNOWN"; } @@ -610,6 +639,8 @@ export interface RestoreClusterRequest { * To get the backup ID, use a [ClusterService.ListBackups] request. */ backupId: string; + /** Timestamp of the moment to which the Greenplum cluster should be restored. */ + time?: Date; /** ID of the folder to create the Greenplum® cluster in. */ folderId: string; /** Name of the Greenplum® cluster. The name must be unique within the folder. */ @@ -641,6 +672,10 @@ export interface RestoreClusterRequest { placementGroupId: string; /** A Greenplum® cluster maintenance window. Should be defined by either one of the two options. */ maintenanceWindow?: MaintenanceWindow; + /** Number of segment hosts */ + segmentHostCount: number; + /** Number of segments on each host */ + segmentInHost: number; } export interface RestoreClusterRequest_LabelsEntry { @@ -1004,6 +1039,12 @@ export const CreateClusterRequest = { if (message.configSpec !== undefined) { ConfigSpec.encode(message.configSpec, writer.uint32(162).fork()).ldelim(); } + if (message.cloudStorage !== undefined) { + CloudStorage.encode( + message.cloudStorage, + writer.uint32(170).fork() + ).ldelim(); + } return writer; }, @@ -1092,6 +1133,9 @@ export const CreateClusterRequest = { case 20: message.configSpec = ConfigSpec.decode(reader, reader.uint32()); break; + case 21: + message.cloudStorage = CloudStorage.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1180,6 +1224,10 @@ export const CreateClusterRequest = { object.configSpec !== undefined && object.configSpec !== null ? ConfigSpec.fromJSON(object.configSpec) : undefined; + message.cloudStorage = + object.cloudStorage !== undefined && object.cloudStorage !== null + ? CloudStorage.fromJSON(object.cloudStorage) + : undefined; return message; }, @@ -1239,6 +1287,10 @@ export const CreateClusterRequest = { (obj.configSpec = message.configSpec ? ConfigSpec.toJSON(message.configSpec) : undefined); + message.cloudStorage !== undefined && + (obj.cloudStorage = message.cloudStorage + ? CloudStorage.toJSON(message.cloudStorage) + : undefined); return obj; }, @@ -1288,6 +1340,10 @@ export const CreateClusterRequest = { object.configSpec !== undefined && object.configSpec !== null ? ConfigSpec.fromPartial(object.configSpec) : undefined; + message.cloudStorage = + object.cloudStorage !== undefined && object.cloudStorage !== null + ? CloudStorage.fromPartial(object.cloudStorage) + : undefined; return message; }, }; @@ -1403,12 +1459,39 @@ export const ConfigSpec = { writer.uint32(18).fork() ).ldelim(); } + if (message.greenplumConfig621 !== undefined) { + Greenplumconfig621.encode( + message.greenplumConfig621, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.greenplumConfig622 !== undefined) { + Greenplumconfig622.encode( + message.greenplumConfig622, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.greenplumConfig6 !== undefined) { + GreenplumConfig6.encode( + message.greenplumConfig6, + writer.uint32(74).fork() + ).ldelim(); + } if (message.pool !== undefined) { ConnectionPoolerConfig.encode( message.pool, writer.uint32(26).fork() ).ldelim(); } + if (message.backgroundActivities !== undefined) { + BackgroundActivitiesConfig.encode( + message.backgroundActivities, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.pxfConfig !== undefined) { + PXFConfig.encode(message.pxfConfig, writer.uint32(66).fork()).ldelim(); + } return writer; }, @@ -1431,9 +1514,36 @@ export const ConfigSpec = { reader.uint32() ); break; + case 4: + message.greenplumConfig621 = Greenplumconfig621.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.greenplumConfig622 = Greenplumconfig622.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.greenplumConfig6 = GreenplumConfig6.decode( + reader, + reader.uint32() + ); + break; case 3: message.pool = ConnectionPoolerConfig.decode(reader, reader.uint32()); break; + case 6: + message.backgroundActivities = BackgroundActivitiesConfig.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.pxfConfig = PXFConfig.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1454,10 +1564,34 @@ export const ConfigSpec = { object.greenplumConfig_6_19 !== null ? Greenplumconfig619.fromJSON(object.greenplumConfig_6_19) : undefined; + message.greenplumConfig621 = + object.greenplumConfig_6_21 !== undefined && + object.greenplumConfig_6_21 !== null + ? Greenplumconfig621.fromJSON(object.greenplumConfig_6_21) + : undefined; + message.greenplumConfig622 = + object.greenplumConfig_6_22 !== undefined && + object.greenplumConfig_6_22 !== null + ? Greenplumconfig622.fromJSON(object.greenplumConfig_6_22) + : undefined; + message.greenplumConfig6 = + object.greenplumConfig_6 !== undefined && + object.greenplumConfig_6 !== null + ? GreenplumConfig6.fromJSON(object.greenplumConfig_6) + : undefined; message.pool = object.pool !== undefined && object.pool !== null ? ConnectionPoolerConfig.fromJSON(object.pool) : undefined; + message.backgroundActivities = + object.backgroundActivities !== undefined && + object.backgroundActivities !== null + ? BackgroundActivitiesConfig.fromJSON(object.backgroundActivities) + : undefined; + message.pxfConfig = + object.pxfConfig !== undefined && object.pxfConfig !== null + ? PXFConfig.fromJSON(object.pxfConfig) + : undefined; return message; }, @@ -1471,10 +1605,30 @@ export const ConfigSpec = { (obj.greenplumConfig_6_19 = message.greenplumConfig619 ? Greenplumconfig619.toJSON(message.greenplumConfig619) : undefined); + message.greenplumConfig621 !== undefined && + (obj.greenplumConfig_6_21 = message.greenplumConfig621 + ? Greenplumconfig621.toJSON(message.greenplumConfig621) + : undefined); + message.greenplumConfig622 !== undefined && + (obj.greenplumConfig_6_22 = message.greenplumConfig622 + ? Greenplumconfig622.toJSON(message.greenplumConfig622) + : undefined); + message.greenplumConfig6 !== undefined && + (obj.greenplumConfig_6 = message.greenplumConfig6 + ? GreenplumConfig6.toJSON(message.greenplumConfig6) + : undefined); message.pool !== undefined && (obj.pool = message.pool ? ConnectionPoolerConfig.toJSON(message.pool) : undefined); + message.backgroundActivities !== undefined && + (obj.backgroundActivities = message.backgroundActivities + ? BackgroundActivitiesConfig.toJSON(message.backgroundActivities) + : undefined); + message.pxfConfig !== undefined && + (obj.pxfConfig = message.pxfConfig + ? PXFConfig.toJSON(message.pxfConfig) + : undefined); return obj; }, @@ -1492,10 +1646,33 @@ export const ConfigSpec = { object.greenplumConfig619 !== null ? Greenplumconfig619.fromPartial(object.greenplumConfig619) : undefined; + message.greenplumConfig621 = + object.greenplumConfig621 !== undefined && + object.greenplumConfig621 !== null + ? Greenplumconfig621.fromPartial(object.greenplumConfig621) + : undefined; + message.greenplumConfig622 = + object.greenplumConfig622 !== undefined && + object.greenplumConfig622 !== null + ? Greenplumconfig622.fromPartial(object.greenplumConfig622) + : undefined; + message.greenplumConfig6 = + object.greenplumConfig6 !== undefined && object.greenplumConfig6 !== null + ? GreenplumConfig6.fromPartial(object.greenplumConfig6) + : undefined; message.pool = object.pool !== undefined && object.pool !== null ? ConnectionPoolerConfig.fromPartial(object.pool) : undefined; + message.backgroundActivities = + object.backgroundActivities !== undefined && + object.backgroundActivities !== null + ? BackgroundActivitiesConfig.fromPartial(object.backgroundActivities) + : undefined; + message.pxfConfig = + object.pxfConfig !== undefined && object.pxfConfig !== null + ? PXFConfig.fromPartial(object.pxfConfig) + : undefined; return message; }, }; @@ -1640,6 +1817,12 @@ export const UpdateClusterRequest = { if (message.configSpec !== undefined) { ConfigSpec.encode(message.configSpec, writer.uint32(154).fork()).ldelim(); } + if (message.cloudStorage !== undefined) { + CloudStorage.encode( + message.cloudStorage, + writer.uint32(162).fork() + ).ldelim(); + } return writer; }, @@ -1709,6 +1892,9 @@ export const UpdateClusterRequest = { case 19: message.configSpec = ConfigSpec.decode(reader, reader.uint32()); break; + case 20: + message.cloudStorage = CloudStorage.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1774,6 +1960,10 @@ export const UpdateClusterRequest = { object.configSpec !== undefined && object.configSpec !== null ? ConfigSpec.fromJSON(object.configSpec) : undefined; + message.cloudStorage = + object.cloudStorage !== undefined && object.cloudStorage !== null + ? CloudStorage.fromJSON(object.cloudStorage) + : undefined; return message; }, @@ -1822,6 +2012,10 @@ export const UpdateClusterRequest = { (obj.configSpec = message.configSpec ? ConfigSpec.toJSON(message.configSpec) : undefined); + message.cloudStorage !== undefined && + (obj.cloudStorage = message.cloudStorage + ? CloudStorage.toJSON(message.cloudStorage) + : undefined); return obj; }, @@ -1868,6 +2062,10 @@ export const UpdateClusterRequest = { object.configSpec !== undefined && object.configSpec !== null ? ConfigSpec.fromPartial(object.configSpec) : undefined; + message.cloudStorage = + object.cloudStorage !== undefined && object.cloudStorage !== null + ? CloudStorage.fromPartial(object.cloudStorage) + : undefined; return message; }, }; @@ -4036,6 +4234,8 @@ const baseRestoreClusterRequest: object = { deletionProtection: false, hostGroupIds: "", placementGroupId: "", + segmentHostCount: 0, + segmentInHost: 0, }; export const RestoreClusterRequest = { @@ -4048,6 +4248,12 @@ export const RestoreClusterRequest = { if (message.backupId !== "") { writer.uint32(10).string(message.backupId); } + if (message.time !== undefined) { + Timestamp.encode( + toTimestamp(message.time), + writer.uint32(130).fork() + ).ldelim(); + } if (message.folderId !== "") { writer.uint32(18).string(message.folderId); } @@ -4110,6 +4316,12 @@ export const RestoreClusterRequest = { writer.uint32(122).fork() ).ldelim(); } + if (message.segmentHostCount !== 0) { + writer.uint32(136).int64(message.segmentHostCount); + } + if (message.segmentInHost !== 0) { + writer.uint32(144).int64(message.segmentInHost); + } return writer; }, @@ -4129,6 +4341,11 @@ export const RestoreClusterRequest = { case 1: message.backupId = reader.string(); break; + case 16: + message.time = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; case 2: message.folderId = reader.string(); break; @@ -4183,6 +4400,12 @@ export const RestoreClusterRequest = { reader.uint32() ); break; + case 17: + message.segmentHostCount = longToNumber(reader.int64() as Long); + break; + case 18: + message.segmentInHost = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -4197,6 +4420,10 @@ export const RestoreClusterRequest = { object.backupId !== undefined && object.backupId !== null ? String(object.backupId) : ""; + message.time = + object.time !== undefined && object.time !== null + ? fromJsonTimestamp(object.time) + : undefined; message.folderId = object.folderId !== undefined && object.folderId !== null ? String(object.folderId) @@ -4255,12 +4482,21 @@ export const RestoreClusterRequest = { object.maintenanceWindow !== null ? MaintenanceWindow.fromJSON(object.maintenanceWindow) : undefined; + message.segmentHostCount = + object.segmentHostCount !== undefined && object.segmentHostCount !== null + ? Number(object.segmentHostCount) + : 0; + message.segmentInHost = + object.segmentInHost !== undefined && object.segmentInHost !== null + ? Number(object.segmentInHost) + : 0; return message; }, toJSON(message: RestoreClusterRequest): unknown { const obj: any = {}; message.backupId !== undefined && (obj.backupId = message.backupId); + message.time !== undefined && (obj.time = message.time.toISOString()); message.folderId !== undefined && (obj.folderId = message.folderId); message.name !== undefined && (obj.name = message.name); message.description !== undefined && @@ -4304,6 +4540,10 @@ export const RestoreClusterRequest = { (obj.maintenanceWindow = message.maintenanceWindow ? MaintenanceWindow.toJSON(message.maintenanceWindow) : undefined); + message.segmentHostCount !== undefined && + (obj.segmentHostCount = Math.round(message.segmentHostCount)); + message.segmentInHost !== undefined && + (obj.segmentInHost = Math.round(message.segmentInHost)); return obj; }, @@ -4312,6 +4552,7 @@ export const RestoreClusterRequest = { ): RestoreClusterRequest { const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; message.backupId = object.backupId ?? ""; + message.time = object.time ?? undefined; message.folderId = object.folderId ?? ""; message.name = object.name ?? ""; message.description = object.description ?? ""; @@ -4346,6 +4587,8 @@ export const RestoreClusterRequest = { object.maintenanceWindow !== null ? MaintenanceWindow.fromPartial(object.maintenanceWindow) : undefined; + message.segmentHostCount = object.segmentHostCount ?? 0; + message.segmentInHost = object.segmentInHost ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts index ee62f1f2..a00b6e81 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts @@ -138,6 +138,32 @@ export function connectionPoolerConfig_PoolModeToJSON( } } +export interface BackgroundActivityStartAt { + $type: "yandex.cloud.mdb.greenplum.v1.BackgroundActivityStartAt"; + hours: number; + minutes: number; +} + +export interface TableSizes { + $type: "yandex.cloud.mdb.greenplum.v1.TableSizes"; + starts: BackgroundActivityStartAt[]; +} + +export interface AnalyzeAndVacuum { + $type: "yandex.cloud.mdb.greenplum.v1.AnalyzeAndVacuum"; + start?: BackgroundActivityStartAt; + /** in seconds 24*60*60-1 = 86399 */ + analyzeTimeout?: number; + /** in seconds 24*60*60-1 = 86399 */ + vacuumTimeout?: number; +} + +export interface BackgroundActivitiesConfig { + $type: "yandex.cloud.mdb.greenplum.v1.BackgroundActivitiesConfig"; + tableSizes?: TableSizes; + analyzeAndVacuum?: AnalyzeAndVacuum; +} + export interface MasterSubclusterConfig { $type: "yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfig"; /** Computational resources allocated to Greenplum® master subcluster hosts. */ @@ -150,6 +176,67 @@ export interface SegmentSubclusterConfig { resources?: Resources; } +export interface GreenplumConfig6 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6"; + /** Maximum number of inbound connections on master segment */ + maxConnections?: number; + /** + * Specify the maximum size of WAL files that replication slots are allowed to retain in the pg_wal directory at checkpoint time. + * https://www.postgresql.org/docs/current/runtime-config-replication.html + */ + maxSlotWalKeepSize?: number; + /** + * Sets the maximum total disk size that all running queries are allowed to use for creating temporary spill files at each segment. + * The default value is 0, which means a limit is not enforced. + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_segment + */ + gpWorkfileLimitPerSegment?: number; + /** + * Sets the maximum disk size an individual query is allowed to use for creating temporary spill files at each segment. + * The default value is 0, which means a limit is not enforced. + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_query + */ + gpWorkfileLimitPerQuery?: number; + /** + * Sets the maximum number of temporary spill files (also known as workfiles) allowed per query per segment. + * Spill files are created when executing a query that requires more memory than it is allocated. + * The current query is terminated when the limit is exceeded. + * Set the value to 0 (zero) to allow an unlimited number of spill files. master session reload + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_files_per_query + * Default value is 10000 + */ + gpWorkfileLimitFilesPerQuery?: number; + /** + * Sets the maximum number of transactions that can be in the "prepared" state simultaneously + * https://www.postgresql.org/docs/9.6/runtime-config-resource.html + */ + maxPreparedTransactions?: number; + /** + * Specifies whether the temporary files created, when a hash aggregation or hash join operation spills to disk, are compressed. + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_compression + */ + gpWorkfileCompression?: boolean; + /** + * Sets the maximum memory limit for a query. Helps avoid out-of-memory errors on a segment host during query processing as a result of setting statement_mem too high. + * Taking into account the configuration of a single segment host, calculate max_statement_mem as follows: + * (seghost_physical_memory) / (average_number_concurrent_queries) + * When changing both max_statement_mem and statement_mem, max_statement_mem must be changed first, or listed first in the postgresql.conf file. + * https://greenplum.docs.pivotal.io/6-19/ref_guide/config_params/guc-list.html#max_statement_mem + * Default value is 2097152000 (2000MB) + */ + maxStatementMem?: number; + /** + * Controls which SQL statements are logged. DDL logs all data definition commands like CREATE, ALTER, and DROP commands. + * MOD logs all DDL statements, plus INSERT, UPDATE, DELETE, TRUNCATE, and COPY FROM. + * PREPARE and EXPLAIN ANALYZE statements are also logged if their contained command is of an appropriate type. + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#log_statement + * Default value is ddl + */ + logStatement: LogStatement; + /** https://docs.vmware.com/en/VMware-Tanzu-Greenplum/6/greenplum-database/GUID-ref_guide-config_params-guc-list.html#gp_add_column_inherits_table_setting */ + gpAddColumnInheritsTableSetting?: boolean; +} + export interface Greenplumconfig617 { $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17"; /** Maximum number of inbound connections on master segment. */ @@ -165,7 +252,7 @@ export interface Greenplumconfig617 { * * The default value is 0 (no limit). * - * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_segment). + * More info in [Greenplum® documentation](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_per_segment). */ gpWorkfileLimitPerSegment?: number; /** @@ -173,7 +260,7 @@ export interface Greenplumconfig617 { * * The default value is 0 (no limit). * - * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_query). + * More info in [Greenplum® documentation](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_per_query). */ gpWorkfileLimitPerQuery?: number; /** @@ -189,7 +276,7 @@ export interface Greenplumconfig617 { * * Default value is 10000. * - * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_files_per_query). + * More info in [Greenplum® documentation](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_files_per_query). */ gpWorkfileLimitFilesPerQuery?: number; /** @@ -201,7 +288,7 @@ export interface Greenplumconfig617 { /** * Whether the spill files are compressed or not. * - * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_compression). + * More info in [Greenplum® documentation](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_compression). */ gpWorkfileCompression?: boolean; } @@ -221,7 +308,7 @@ export interface Greenplumconfig619 { * * The default value is 0 (no limit). * - * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_segment). + * More info in [Greenplum® documentation](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_per_segment). */ gpWorkfileLimitPerSegment?: number; /** @@ -229,7 +316,7 @@ export interface Greenplumconfig619 { * * The default value is 0 (no limit). * - * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_query). + * More info in [Greenplum® documentation](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_per_query). */ gpWorkfileLimitPerQuery?: number; /** @@ -245,7 +332,7 @@ export interface Greenplumconfig619 { * * Default value is 10000. * - * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_files_per_query). + * More info in [Greenplum® documentation](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_files_per_query). */ gpWorkfileLimitFilesPerQuery?: number; /** @@ -257,7 +344,7 @@ export interface Greenplumconfig619 { /** * Whether the spill files are compressed or not. * - * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_compression). + * More info in [Greenplum® documentation](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_compression). */ gpWorkfileCompression?: boolean; /** @@ -279,15 +366,137 @@ export interface Greenplumconfig619 { * * `PREPARE` and `EXPLAIN ANALYZE` statements are also logged if their contained command belongs to an appropriate type. * - * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#log_statement). + * More info in [Greenplum® documentation](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#log_statement). + */ + logStatement: LogStatement; +} + +export interface Greenplumconfig621 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_21"; + /** Maximum number of inbound connections on master segment */ + maxConnections?: number; + /** + * Specify the maximum size of WAL files that replication slots are allowed to retain in the pg_wal directory at checkpoint time. + * https://www.postgresql.org/docs/current/runtime-config-replication.html + */ + maxSlotWalKeepSize?: number; + /** + * Sets the maximum total disk size that all running queries are allowed to use for creating temporary spill files at each segment. + * The default value is 0, which means a limit is not enforced. + * https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_per_segment + */ + gpWorkfileLimitPerSegment?: number; + /** + * Sets the maximum disk size an individual query is allowed to use for creating temporary spill files at each segment. + * The default value is 0, which means a limit is not enforced. + * https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_per_query + */ + gpWorkfileLimitPerQuery?: number; + /** + * Sets the maximum number of temporary spill files (also known as workfiles) allowed per query per segment. + * Spill files are created when executing a query that requires more memory than it is allocated. + * The current query is terminated when the limit is exceeded. + * Set the value to 0 (zero) to allow an unlimited number of spill files. master session reload + * https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_files_per_query + * Default value is 10000 + */ + gpWorkfileLimitFilesPerQuery?: number; + /** + * Sets the maximum number of transactions that can be in the "prepared" state simultaneously + * https://www.postgresql.org/docs/9.6/runtime-config-resource.html + */ + maxPreparedTransactions?: number; + /** + * Specifies whether the temporary files created, when a hash aggregation or hash join operation spills to disk, are compressed. + * https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_compression + */ + gpWorkfileCompression?: boolean; + /** + * Sets the maximum memory limit for a query. Helps avoid out-of-memory errors on a segment host during query processing as a result of setting statement_mem too high. + * Taking into account the configuration of a single segment host, calculate max_statement_mem as follows: + * (seghost_physical_memory) / (average_number_concurrent_queries) + * When changing both max_statement_mem and statement_mem, max_statement_mem must be changed first, or listed first in the postgresql.conf file. + * https://greenplum.docs.pivotal.io/6-19/ref_guide/config_params/guc-list.html#max_statement_mem + * Default value is 2097152000 (2000MB) + */ + maxStatementMem?: number; + /** + * Controls which SQL statements are logged. DDL logs all data definition commands like CREATE, ALTER, and DROP commands. + * MOD logs all DDL statements, plus INSERT, UPDATE, DELETE, TRUNCATE, and COPY FROM. + * PREPARE and EXPLAIN ANALYZE statements are also logged if their contained command is of an appropriate type. + * https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#log_statement + * Default value is ddl + */ + logStatement: LogStatement; + /** https://docs.vmware.com/en/VMware-Tanzu-Greenplum/6/greenplum-database/GUID-ref_guide-config_params-guc-list.html#gp_add_column_inherits_table_setting */ + gpAddColumnInheritsTableSetting?: boolean; +} + +export interface Greenplumconfig622 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_22"; + /** Maximum number of inbound connections on master segment */ + maxConnections?: number; + /** + * Specify the maximum size of WAL files that replication slots are allowed to retain in the pg_wal directory at checkpoint time. + * https://www.postgresql.org/docs/current/runtime-config-replication.html + */ + maxSlotWalKeepSize?: number; + /** + * Sets the maximum total disk size that all running queries are allowed to use for creating temporary spill files at each segment. + * The default value is 0, which means a limit is not enforced. + * https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_per_segment + */ + gpWorkfileLimitPerSegment?: number; + /** + * Sets the maximum disk size an individual query is allowed to use for creating temporary spill files at each segment. + * The default value is 0, which means a limit is not enforced. + * https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_per_query + */ + gpWorkfileLimitPerQuery?: number; + /** + * Sets the maximum number of temporary spill files (also known as workfiles) allowed per query per segment. + * Spill files are created when executing a query that requires more memory than it is allocated. + * The current query is terminated when the limit is exceeded. + * Set the value to 0 (zero) to allow an unlimited number of spill files. master session reload + * https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_limit_files_per_query + * Default value is 10000 + */ + gpWorkfileLimitFilesPerQuery?: number; + /** + * Sets the maximum number of transactions that can be in the "prepared" state simultaneously + * https://www.postgresql.org/docs/9.6/runtime-config-resource.html + */ + maxPreparedTransactions?: number; + /** + * Specifies whether the temporary files created, when a hash aggregation or hash join operation spills to disk, are compressed. + * https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#gp_workfile_compression + */ + gpWorkfileCompression?: boolean; + /** + * Sets the maximum memory limit for a query. Helps avoid out-of-memory errors on a segment host during query processing as a result of setting statement_mem too high. + * Taking into account the configuration of a single segment host, calculate max_statement_mem as follows: + * (seghost_physical_memory) / (average_number_concurrent_queries) + * When changing both max_statement_mem and statement_mem, max_statement_mem must be changed first, or listed first in the postgresql.conf file. + * https://greenplum.docs.pivotal.io/6-19/ref_guide/config_params/guc-list.html#max_statement_mem + * Default value is 2097152000 (2000MB) + */ + maxStatementMem?: number; + /** + * Controls which SQL statements are logged. DDL logs all data definition commands like CREATE, ALTER, and DROP commands. + * MOD logs all DDL statements, plus INSERT, UPDATE, DELETE, TRUNCATE, and COPY FROM. + * PREPARE and EXPLAIN ANALYZE statements are also logged if their contained command is of an appropriate type. + * https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-config_params-guc-list.html#log_statement + * Default value is ddl */ logStatement: LogStatement; + /** https://docs.vmware.com/en/VMware-Tanzu-Greenplum/6/greenplum-database/GUID-ref_guide-config_params-guc-list.html#gp_add_column_inherits_table_setting */ + gpAddColumnInheritsTableSetting?: boolean; } /** Configuration settings version 6.17 */ export interface Greenplumconfigset617 { $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17"; - /** Effective settings for a Greenplum® cluster (a combination of settings defined in [GreenplumConfigSet6_17.user_config] and [GreenplumConfigSet6_17.default_config]). */ + /** Effective settings for a Greenplum® cluster (a combination of settings defined in [user_config] and [default_config]). */ effectiveConfig?: Greenplumconfig617; /** User-defined settings for a Greenplum® cluster. */ userConfig?: Greenplumconfig617; @@ -298,7 +507,7 @@ export interface Greenplumconfigset617 { /** Configuration settings version 6.19 */ export interface Greenplumconfigset619 { $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19"; - /** Effective settings for a Greenplum® cluster (a combination of settings defined in [GreenplumConfigSet6_19.user_config] and [GreenplumConfigSet6_19.default_config]). */ + /** Effective settings for a Greenplum® cluster (a combination of settings defined in [user_config] and [default_config]). */ effectiveConfig?: Greenplumconfig619; /** User-defined settings for a Greenplum® cluster. */ userConfig?: Greenplumconfig619; @@ -306,6 +515,39 @@ export interface Greenplumconfigset619 { defaultConfig?: Greenplumconfig619; } +export interface Greenplumconfigset621 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_21"; + /** Effective settings for a Greenplum® cluster (a combination of settings defined in [user_config] and [default_config]). */ + effectiveConfig?: Greenplumconfig621; + /** User-defined settings for a Greenplum® cluster. */ + userConfig?: Greenplumconfig621; + /** Default configuration for a Greenplum® cluster. */ + defaultConfig?: Greenplumconfig621; +} + +export interface Greenplumconfigset622 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_22"; + /** Effective settings for a Greenplum® cluster (a combination of settings defined in [user_config] and [default_config]). */ + effectiveConfig?: Greenplumconfig622; + /** User-defined settings for a Greenplum® cluster. */ + userConfig?: Greenplumconfig622; + /** Default configuration for a Greenplum® cluster. */ + defaultConfig?: Greenplumconfig622; +} + +export interface GreenplumConfigSet6 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6"; + /** + * Effective settings for a Greenplum (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: GreenplumConfig6; + /** User-defined settings for a Greenplum. */ + userConfig?: GreenplumConfig6; + /** Default configuration for a Greenplum. */ + defaultConfig?: GreenplumConfig6; +} + export interface ConnectionPoolerConfigSet { $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet"; /** Effective settings for an Odyssey® pooler (a combination of settings defined in [ConnectionPoolerConfigSet.user_config] and [ConnectionPoolerConfigSet.default_config]). */ @@ -510,19 +752,24 @@ export const ConnectionPoolerConfig = { messageTypeRegistry.set(ConnectionPoolerConfig.$type, ConnectionPoolerConfig); -const baseMasterSubclusterConfig: object = { - $type: "yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfig", +const baseBackgroundActivityStartAt: object = { + $type: "yandex.cloud.mdb.greenplum.v1.BackgroundActivityStartAt", + hours: 0, + minutes: 0, }; -export const MasterSubclusterConfig = { - $type: "yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfig" as const, +export const BackgroundActivityStartAt = { + $type: "yandex.cloud.mdb.greenplum.v1.BackgroundActivityStartAt" as const, encode( - message: MasterSubclusterConfig, + message: BackgroundActivityStartAt, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.resources !== undefined) { - Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); + if (message.hours !== 0) { + writer.uint32(8).int64(message.hours); + } + if (message.minutes !== 0) { + writer.uint32(16).int64(message.minutes); } return writer; }, @@ -530,15 +777,20 @@ export const MasterSubclusterConfig = { decode( input: _m0.Reader | Uint8Array, length?: number - ): MasterSubclusterConfig { + ): BackgroundActivityStartAt { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMasterSubclusterConfig } as MasterSubclusterConfig; + const message = { + ...baseBackgroundActivityStartAt, + } as BackgroundActivityStartAt; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.resources = Resources.decode(reader, reader.uint32()); + message.hours = longToNumber(reader.int64() as Long); + break; + case 2: + message.minutes = longToNumber(reader.int64() as Long); break; default: reader.skipType(tag & 7); @@ -548,69 +800,75 @@ export const MasterSubclusterConfig = { return message; }, - fromJSON(object: any): MasterSubclusterConfig { - const message = { ...baseMasterSubclusterConfig } as MasterSubclusterConfig; - message.resources = - object.resources !== undefined && object.resources !== null - ? Resources.fromJSON(object.resources) - : undefined; + fromJSON(object: any): BackgroundActivityStartAt { + const message = { + ...baseBackgroundActivityStartAt, + } as BackgroundActivityStartAt; + message.hours = + object.hours !== undefined && object.hours !== null + ? Number(object.hours) + : 0; + message.minutes = + object.minutes !== undefined && object.minutes !== null + ? Number(object.minutes) + : 0; return message; }, - toJSON(message: MasterSubclusterConfig): unknown { + toJSON(message: BackgroundActivityStartAt): unknown { const obj: any = {}; - message.resources !== undefined && - (obj.resources = message.resources - ? Resources.toJSON(message.resources) - : undefined); + message.hours !== undefined && (obj.hours = Math.round(message.hours)); + message.minutes !== undefined && + (obj.minutes = Math.round(message.minutes)); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): MasterSubclusterConfig { - const message = { ...baseMasterSubclusterConfig } as MasterSubclusterConfig; - message.resources = - object.resources !== undefined && object.resources !== null - ? Resources.fromPartial(object.resources) - : undefined; + ): BackgroundActivityStartAt { + const message = { + ...baseBackgroundActivityStartAt, + } as BackgroundActivityStartAt; + message.hours = object.hours ?? 0; + message.minutes = object.minutes ?? 0; return message; }, }; -messageTypeRegistry.set(MasterSubclusterConfig.$type, MasterSubclusterConfig); +messageTypeRegistry.set( + BackgroundActivityStartAt.$type, + BackgroundActivityStartAt +); -const baseSegmentSubclusterConfig: object = { - $type: "yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfig", +const baseTableSizes: object = { + $type: "yandex.cloud.mdb.greenplum.v1.TableSizes", }; -export const SegmentSubclusterConfig = { - $type: "yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfig" as const, +export const TableSizes = { + $type: "yandex.cloud.mdb.greenplum.v1.TableSizes" as const, encode( - message: SegmentSubclusterConfig, + message: TableSizes, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.resources !== undefined) { - Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); + for (const v of message.starts) { + BackgroundActivityStartAt.encode(v!, writer.uint32(10).fork()).ldelim(); } return writer; }, - decode( - input: _m0.Reader | Uint8Array, - length?: number - ): SegmentSubclusterConfig { + decode(input: _m0.Reader | Uint8Array, length?: number): TableSizes { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { - ...baseSegmentSubclusterConfig, - } as SegmentSubclusterConfig; + const message = { ...baseTableSizes } as TableSizes; + message.starts = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.resources = Resources.decode(reader, reader.uint32()); + message.starts.push( + BackgroundActivityStartAt.decode(reader, reader.uint32()) + ); break; default: reader.skipType(tag & 7); @@ -620,164 +878,1726 @@ export const SegmentSubclusterConfig = { return message; }, - fromJSON(object: any): SegmentSubclusterConfig { - const message = { - ...baseSegmentSubclusterConfig, - } as SegmentSubclusterConfig; - message.resources = - object.resources !== undefined && object.resources !== null - ? Resources.fromJSON(object.resources) - : undefined; + fromJSON(object: any): TableSizes { + const message = { ...baseTableSizes } as TableSizes; + message.starts = (object.starts ?? []).map((e: any) => + BackgroundActivityStartAt.fromJSON(e) + ); return message; }, - toJSON(message: SegmentSubclusterConfig): unknown { + toJSON(message: TableSizes): unknown { const obj: any = {}; - message.resources !== undefined && - (obj.resources = message.resources - ? Resources.toJSON(message.resources) - : undefined); + if (message.starts) { + obj.starts = message.starts.map((e) => + e ? BackgroundActivityStartAt.toJSON(e) : undefined + ); + } else { + obj.starts = []; + } return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): SegmentSubclusterConfig { - const message = { - ...baseSegmentSubclusterConfig, - } as SegmentSubclusterConfig; - message.resources = - object.resources !== undefined && object.resources !== null - ? Resources.fromPartial(object.resources) - : undefined; + ): TableSizes { + const message = { ...baseTableSizes } as TableSizes; + message.starts = + object.starts?.map((e) => BackgroundActivityStartAt.fromPartial(e)) || []; return message; }, }; -messageTypeRegistry.set(SegmentSubclusterConfig.$type, SegmentSubclusterConfig); +messageTypeRegistry.set(TableSizes.$type, TableSizes); -const baseGreenplumconfig617: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17", +const baseAnalyzeAndVacuum: object = { + $type: "yandex.cloud.mdb.greenplum.v1.AnalyzeAndVacuum", }; -export const Greenplumconfig617 = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17" as const, +export const AnalyzeAndVacuum = { + $type: "yandex.cloud.mdb.greenplum.v1.AnalyzeAndVacuum" as const, encode( - message: Greenplumconfig617, + message: AnalyzeAndVacuum, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.maxConnections !== undefined) { - Int64Value.encode( - { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + if (message.start !== undefined) { + BackgroundActivityStartAt.encode( + message.start, writer.uint32(10).fork() ).ldelim(); } - if (message.maxSlotWalKeepSize !== undefined) { + if (message.analyzeTimeout !== undefined) { Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.maxSlotWalKeepSize!, - }, + { $type: "google.protobuf.Int64Value", value: message.analyzeTimeout! }, writer.uint32(18).fork() ).ldelim(); } - if (message.gpWorkfileLimitPerSegment !== undefined) { + if (message.vacuumTimeout !== undefined) { Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.gpWorkfileLimitPerSegment!, - }, + { $type: "google.protobuf.Int64Value", value: message.vacuumTimeout! }, writer.uint32(26).fork() ).ldelim(); } - if (message.gpWorkfileLimitPerQuery !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.gpWorkfileLimitPerQuery!, - }, - writer.uint32(34).fork() - ).ldelim(); - } - if (message.gpWorkfileLimitFilesPerQuery !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.gpWorkfileLimitFilesPerQuery!, - }, - writer.uint32(42).fork() - ).ldelim(); - } - if (message.maxPreparedTransactions !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.maxPreparedTransactions!, - }, - writer.uint32(50).fork() - ).ldelim(); - } - if (message.gpWorkfileCompression !== undefined) { - BoolValue.encode( - { - $type: "google.protobuf.BoolValue", - value: message.gpWorkfileCompression!, - }, - writer.uint32(58).fork() - ).ldelim(); - } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Greenplumconfig617 { + decode(input: _m0.Reader | Uint8Array, length?: number): AnalyzeAndVacuum { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; + const message = { ...baseAnalyzeAndVacuum } as AnalyzeAndVacuum; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.maxConnections = Int64Value.decode( + message.start = BackgroundActivityStartAt.decode( reader, reader.uint32() - ).value; + ); break; case 2: - message.maxSlotWalKeepSize = Int64Value.decode( + message.analyzeTimeout = Int64Value.decode( reader, reader.uint32() ).value; break; case 3: - message.gpWorkfileLimitPerSegment = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 4: - message.gpWorkfileLimitPerQuery = Int64Value.decode( + message.vacuumTimeout = Int64Value.decode( reader, reader.uint32() ).value; break; - case 5: - message.gpWorkfileLimitFilesPerQuery = Int64Value.decode( - reader, - reader.uint32() - ).value; + default: + reader.skipType(tag & 7); break; - case 6: - message.maxPreparedTransactions = Int64Value.decode( + } + } + return message; + }, + + fromJSON(object: any): AnalyzeAndVacuum { + const message = { ...baseAnalyzeAndVacuum } as AnalyzeAndVacuum; + message.start = + object.start !== undefined && object.start !== null + ? BackgroundActivityStartAt.fromJSON(object.start) + : undefined; + message.analyzeTimeout = + object.analyzeTimeout !== undefined && object.analyzeTimeout !== null + ? Number(object.analyzeTimeout) + : undefined; + message.vacuumTimeout = + object.vacuumTimeout !== undefined && object.vacuumTimeout !== null + ? Number(object.vacuumTimeout) + : undefined; + return message; + }, + + toJSON(message: AnalyzeAndVacuum): unknown { + const obj: any = {}; + message.start !== undefined && + (obj.start = message.start + ? BackgroundActivityStartAt.toJSON(message.start) + : undefined); + message.analyzeTimeout !== undefined && + (obj.analyzeTimeout = message.analyzeTimeout); + message.vacuumTimeout !== undefined && + (obj.vacuumTimeout = message.vacuumTimeout); + return obj; + }, + + fromPartial, I>>( + object: I + ): AnalyzeAndVacuum { + const message = { ...baseAnalyzeAndVacuum } as AnalyzeAndVacuum; + message.start = + object.start !== undefined && object.start !== null + ? BackgroundActivityStartAt.fromPartial(object.start) + : undefined; + message.analyzeTimeout = object.analyzeTimeout ?? undefined; + message.vacuumTimeout = object.vacuumTimeout ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(AnalyzeAndVacuum.$type, AnalyzeAndVacuum); + +const baseBackgroundActivitiesConfig: object = { + $type: "yandex.cloud.mdb.greenplum.v1.BackgroundActivitiesConfig", +}; + +export const BackgroundActivitiesConfig = { + $type: "yandex.cloud.mdb.greenplum.v1.BackgroundActivitiesConfig" as const, + + encode( + message: BackgroundActivitiesConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.tableSizes !== undefined) { + TableSizes.encode(message.tableSizes, writer.uint32(10).fork()).ldelim(); + } + if (message.analyzeAndVacuum !== undefined) { + AnalyzeAndVacuum.encode( + message.analyzeAndVacuum, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): BackgroundActivitiesConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseBackgroundActivitiesConfig, + } as BackgroundActivitiesConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tableSizes = TableSizes.decode(reader, reader.uint32()); + break; + case 2: + message.analyzeAndVacuum = AnalyzeAndVacuum.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BackgroundActivitiesConfig { + const message = { + ...baseBackgroundActivitiesConfig, + } as BackgroundActivitiesConfig; + message.tableSizes = + object.tableSizes !== undefined && object.tableSizes !== null + ? TableSizes.fromJSON(object.tableSizes) + : undefined; + message.analyzeAndVacuum = + object.analyzeAndVacuum !== undefined && object.analyzeAndVacuum !== null + ? AnalyzeAndVacuum.fromJSON(object.analyzeAndVacuum) + : undefined; + return message; + }, + + toJSON(message: BackgroundActivitiesConfig): unknown { + const obj: any = {}; + message.tableSizes !== undefined && + (obj.tableSizes = message.tableSizes + ? TableSizes.toJSON(message.tableSizes) + : undefined); + message.analyzeAndVacuum !== undefined && + (obj.analyzeAndVacuum = message.analyzeAndVacuum + ? AnalyzeAndVacuum.toJSON(message.analyzeAndVacuum) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): BackgroundActivitiesConfig { + const message = { + ...baseBackgroundActivitiesConfig, + } as BackgroundActivitiesConfig; + message.tableSizes = + object.tableSizes !== undefined && object.tableSizes !== null + ? TableSizes.fromPartial(object.tableSizes) + : undefined; + message.analyzeAndVacuum = + object.analyzeAndVacuum !== undefined && object.analyzeAndVacuum !== null + ? AnalyzeAndVacuum.fromPartial(object.analyzeAndVacuum) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + BackgroundActivitiesConfig.$type, + BackgroundActivitiesConfig +); + +const baseMasterSubclusterConfig: object = { + $type: "yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfig", +}; + +export const MasterSubclusterConfig = { + $type: "yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfig" as const, + + encode( + message: MasterSubclusterConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): MasterSubclusterConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMasterSubclusterConfig } as MasterSubclusterConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MasterSubclusterConfig { + const message = { ...baseMasterSubclusterConfig } as MasterSubclusterConfig; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: MasterSubclusterConfig): unknown { + const obj: any = {}; + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): MasterSubclusterConfig { + const message = { ...baseMasterSubclusterConfig } as MasterSubclusterConfig; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MasterSubclusterConfig.$type, MasterSubclusterConfig); + +const baseSegmentSubclusterConfig: object = { + $type: "yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfig", +}; + +export const SegmentSubclusterConfig = { + $type: "yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfig" as const, + + encode( + message: SegmentSubclusterConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SegmentSubclusterConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSegmentSubclusterConfig, + } as SegmentSubclusterConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SegmentSubclusterConfig { + const message = { + ...baseSegmentSubclusterConfig, + } as SegmentSubclusterConfig; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: SegmentSubclusterConfig): unknown { + const obj: any = {}; + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): SegmentSubclusterConfig { + const message = { + ...baseSegmentSubclusterConfig, + } as SegmentSubclusterConfig; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(SegmentSubclusterConfig.$type, SegmentSubclusterConfig); + +const baseGreenplumConfig6: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6", + logStatement: 0, +}; + +export const GreenplumConfig6 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6" as const, + + encode( + message: GreenplumConfig6, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitPerSegment !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitPerSegment!, + }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitPerQuery !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitPerQuery!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitFilesPerQuery !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitFilesPerQuery!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.gpWorkfileCompression !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.gpWorkfileCompression!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStatementMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStatementMem!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(72).int32(message.logStatement); + } + if (message.gpAddColumnInheritsTableSetting !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.gpAddColumnInheritsTableSetting!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GreenplumConfig6 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGreenplumConfig6 } as GreenplumConfig6; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.gpWorkfileLimitPerSegment = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.gpWorkfileLimitPerQuery = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.gpWorkfileLimitFilesPerQuery = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.gpWorkfileCompression = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStatementMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.logStatement = reader.int32() as any; + break; + case 10: + message.gpAddColumnInheritsTableSetting = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GreenplumConfig6 { + const message = { ...baseGreenplumConfig6 } as GreenplumConfig6; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment !== undefined && + object.gpWorkfileLimitPerSegment !== null + ? Number(object.gpWorkfileLimitPerSegment) + : undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery !== undefined && + object.gpWorkfileLimitPerQuery !== null + ? Number(object.gpWorkfileLimitPerQuery) + : undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery !== undefined && + object.gpWorkfileLimitFilesPerQuery !== null + ? Number(object.gpWorkfileLimitFilesPerQuery) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.gpWorkfileCompression = + object.gpWorkfileCompression !== undefined && + object.gpWorkfileCompression !== null + ? Boolean(object.gpWorkfileCompression) + : undefined; + message.maxStatementMem = + object.maxStatementMem !== undefined && object.maxStatementMem !== null + ? Number(object.maxStatementMem) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? logStatementFromJSON(object.logStatement) + : 0; + message.gpAddColumnInheritsTableSetting = + object.gpAddColumnInheritsTableSetting !== undefined && + object.gpAddColumnInheritsTableSetting !== null + ? Boolean(object.gpAddColumnInheritsTableSetting) + : undefined; + return message; + }, + + toJSON(message: GreenplumConfig6): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.gpWorkfileLimitPerSegment !== undefined && + (obj.gpWorkfileLimitPerSegment = message.gpWorkfileLimitPerSegment); + message.gpWorkfileLimitPerQuery !== undefined && + (obj.gpWorkfileLimitPerQuery = message.gpWorkfileLimitPerQuery); + message.gpWorkfileLimitFilesPerQuery !== undefined && + (obj.gpWorkfileLimitFilesPerQuery = message.gpWorkfileLimitFilesPerQuery); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.gpWorkfileCompression !== undefined && + (obj.gpWorkfileCompression = message.gpWorkfileCompression); + message.maxStatementMem !== undefined && + (obj.maxStatementMem = message.maxStatementMem); + message.logStatement !== undefined && + (obj.logStatement = logStatementToJSON(message.logStatement)); + message.gpAddColumnInheritsTableSetting !== undefined && + (obj.gpAddColumnInheritsTableSetting = + message.gpAddColumnInheritsTableSetting); + return obj; + }, + + fromPartial, I>>( + object: I + ): GreenplumConfig6 { + const message = { ...baseGreenplumConfig6 } as GreenplumConfig6; + message.maxConnections = object.maxConnections ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment ?? undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery ?? undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; + message.maxStatementMem = object.maxStatementMem ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.gpAddColumnInheritsTableSetting = + object.gpAddColumnInheritsTableSetting ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(GreenplumConfig6.$type, GreenplumConfig6); + +const baseGreenplumconfig617: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17", +}; + +export const Greenplumconfig617 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17" as const, + + encode( + message: Greenplumconfig617, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitPerSegment !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitPerSegment!, + }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitPerQuery !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitPerQuery!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitFilesPerQuery !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitFilesPerQuery!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.gpWorkfileCompression !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.gpWorkfileCompression!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Greenplumconfig617 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.gpWorkfileLimitPerSegment = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.gpWorkfileLimitPerQuery = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.gpWorkfileLimitFilesPerQuery = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.gpWorkfileCompression = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Greenplumconfig617 { + const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment !== undefined && + object.gpWorkfileLimitPerSegment !== null + ? Number(object.gpWorkfileLimitPerSegment) + : undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery !== undefined && + object.gpWorkfileLimitPerQuery !== null + ? Number(object.gpWorkfileLimitPerQuery) + : undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery !== undefined && + object.gpWorkfileLimitFilesPerQuery !== null + ? Number(object.gpWorkfileLimitFilesPerQuery) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.gpWorkfileCompression = + object.gpWorkfileCompression !== undefined && + object.gpWorkfileCompression !== null + ? Boolean(object.gpWorkfileCompression) + : undefined; + return message; + }, + + toJSON(message: Greenplumconfig617): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.gpWorkfileLimitPerSegment !== undefined && + (obj.gpWorkfileLimitPerSegment = message.gpWorkfileLimitPerSegment); + message.gpWorkfileLimitPerQuery !== undefined && + (obj.gpWorkfileLimitPerQuery = message.gpWorkfileLimitPerQuery); + message.gpWorkfileLimitFilesPerQuery !== undefined && + (obj.gpWorkfileLimitFilesPerQuery = message.gpWorkfileLimitFilesPerQuery); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.gpWorkfileCompression !== undefined && + (obj.gpWorkfileCompression = message.gpWorkfileCompression); + return obj; + }, + + fromPartial, I>>( + object: I + ): Greenplumconfig617 { + const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; + message.maxConnections = object.maxConnections ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment ?? undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery ?? undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Greenplumconfig617.$type, Greenplumconfig617); + +const baseGreenplumconfig619: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19", + logStatement: 0, +}; + +export const Greenplumconfig619 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19" as const, + + encode( + message: Greenplumconfig619, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitPerSegment !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitPerSegment!, + }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitPerQuery !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitPerQuery!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitFilesPerQuery !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitFilesPerQuery!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.gpWorkfileCompression !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.gpWorkfileCompression!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStatementMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStatementMem!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(72).int32(message.logStatement); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Greenplumconfig619 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.gpWorkfileLimitPerSegment = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.gpWorkfileLimitPerQuery = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.gpWorkfileLimitFilesPerQuery = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.gpWorkfileCompression = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStatementMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.logStatement = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Greenplumconfig619 { + const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment !== undefined && + object.gpWorkfileLimitPerSegment !== null + ? Number(object.gpWorkfileLimitPerSegment) + : undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery !== undefined && + object.gpWorkfileLimitPerQuery !== null + ? Number(object.gpWorkfileLimitPerQuery) + : undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery !== undefined && + object.gpWorkfileLimitFilesPerQuery !== null + ? Number(object.gpWorkfileLimitFilesPerQuery) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.gpWorkfileCompression = + object.gpWorkfileCompression !== undefined && + object.gpWorkfileCompression !== null + ? Boolean(object.gpWorkfileCompression) + : undefined; + message.maxStatementMem = + object.maxStatementMem !== undefined && object.maxStatementMem !== null + ? Number(object.maxStatementMem) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? logStatementFromJSON(object.logStatement) + : 0; + return message; + }, + + toJSON(message: Greenplumconfig619): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.gpWorkfileLimitPerSegment !== undefined && + (obj.gpWorkfileLimitPerSegment = message.gpWorkfileLimitPerSegment); + message.gpWorkfileLimitPerQuery !== undefined && + (obj.gpWorkfileLimitPerQuery = message.gpWorkfileLimitPerQuery); + message.gpWorkfileLimitFilesPerQuery !== undefined && + (obj.gpWorkfileLimitFilesPerQuery = message.gpWorkfileLimitFilesPerQuery); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.gpWorkfileCompression !== undefined && + (obj.gpWorkfileCompression = message.gpWorkfileCompression); + message.maxStatementMem !== undefined && + (obj.maxStatementMem = message.maxStatementMem); + message.logStatement !== undefined && + (obj.logStatement = logStatementToJSON(message.logStatement)); + return obj; + }, + + fromPartial, I>>( + object: I + ): Greenplumconfig619 { + const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; + message.maxConnections = object.maxConnections ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment ?? undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery ?? undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; + message.maxStatementMem = object.maxStatementMem ?? undefined; + message.logStatement = object.logStatement ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Greenplumconfig619.$type, Greenplumconfig619); + +const baseGreenplumconfig621: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_21", + logStatement: 0, +}; + +export const Greenplumconfig621 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_21" as const, + + encode( + message: Greenplumconfig621, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitPerSegment !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitPerSegment!, + }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitPerQuery !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitPerQuery!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitFilesPerQuery !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitFilesPerQuery!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.gpWorkfileCompression !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.gpWorkfileCompression!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStatementMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStatementMem!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(72).int32(message.logStatement); + } + if (message.gpAddColumnInheritsTableSetting !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.gpAddColumnInheritsTableSetting!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Greenplumconfig621 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGreenplumconfig621 } as Greenplumconfig621; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.gpWorkfileLimitPerSegment = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.gpWorkfileLimitPerQuery = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.gpWorkfileLimitFilesPerQuery = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.gpWorkfileCompression = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStatementMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.logStatement = reader.int32() as any; + break; + case 10: + message.gpAddColumnInheritsTableSetting = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Greenplumconfig621 { + const message = { ...baseGreenplumconfig621 } as Greenplumconfig621; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment !== undefined && + object.gpWorkfileLimitPerSegment !== null + ? Number(object.gpWorkfileLimitPerSegment) + : undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery !== undefined && + object.gpWorkfileLimitPerQuery !== null + ? Number(object.gpWorkfileLimitPerQuery) + : undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery !== undefined && + object.gpWorkfileLimitFilesPerQuery !== null + ? Number(object.gpWorkfileLimitFilesPerQuery) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.gpWorkfileCompression = + object.gpWorkfileCompression !== undefined && + object.gpWorkfileCompression !== null + ? Boolean(object.gpWorkfileCompression) + : undefined; + message.maxStatementMem = + object.maxStatementMem !== undefined && object.maxStatementMem !== null + ? Number(object.maxStatementMem) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? logStatementFromJSON(object.logStatement) + : 0; + message.gpAddColumnInheritsTableSetting = + object.gpAddColumnInheritsTableSetting !== undefined && + object.gpAddColumnInheritsTableSetting !== null + ? Boolean(object.gpAddColumnInheritsTableSetting) + : undefined; + return message; + }, + + toJSON(message: Greenplumconfig621): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.gpWorkfileLimitPerSegment !== undefined && + (obj.gpWorkfileLimitPerSegment = message.gpWorkfileLimitPerSegment); + message.gpWorkfileLimitPerQuery !== undefined && + (obj.gpWorkfileLimitPerQuery = message.gpWorkfileLimitPerQuery); + message.gpWorkfileLimitFilesPerQuery !== undefined && + (obj.gpWorkfileLimitFilesPerQuery = message.gpWorkfileLimitFilesPerQuery); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.gpWorkfileCompression !== undefined && + (obj.gpWorkfileCompression = message.gpWorkfileCompression); + message.maxStatementMem !== undefined && + (obj.maxStatementMem = message.maxStatementMem); + message.logStatement !== undefined && + (obj.logStatement = logStatementToJSON(message.logStatement)); + message.gpAddColumnInheritsTableSetting !== undefined && + (obj.gpAddColumnInheritsTableSetting = + message.gpAddColumnInheritsTableSetting); + return obj; + }, + + fromPartial, I>>( + object: I + ): Greenplumconfig621 { + const message = { ...baseGreenplumconfig621 } as Greenplumconfig621; + message.maxConnections = object.maxConnections ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment ?? undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery ?? undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; + message.maxStatementMem = object.maxStatementMem ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.gpAddColumnInheritsTableSetting = + object.gpAddColumnInheritsTableSetting ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Greenplumconfig621.$type, Greenplumconfig621); + +const baseGreenplumconfig622: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_22", + logStatement: 0, +}; + +export const Greenplumconfig622 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_22" as const, + + encode( + message: Greenplumconfig622, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitPerSegment !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitPerSegment!, + }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitPerQuery !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitPerQuery!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.gpWorkfileLimitFilesPerQuery !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.gpWorkfileLimitFilesPerQuery!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.gpWorkfileCompression !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.gpWorkfileCompression!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStatementMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStatementMem!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(72).int32(message.logStatement); + } + if (message.gpAddColumnInheritsTableSetting !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.gpAddColumnInheritsTableSetting!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Greenplumconfig622 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGreenplumconfig622 } as Greenplumconfig622; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.gpWorkfileLimitPerSegment = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.gpWorkfileLimitPerQuery = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.gpWorkfileLimitFilesPerQuery = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.gpWorkfileCompression = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStatementMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.logStatement = reader.int32() as any; + break; + case 10: + message.gpAddColumnInheritsTableSetting = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Greenplumconfig622 { + const message = { ...baseGreenplumconfig622 } as Greenplumconfig622; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment !== undefined && + object.gpWorkfileLimitPerSegment !== null + ? Number(object.gpWorkfileLimitPerSegment) + : undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery !== undefined && + object.gpWorkfileLimitPerQuery !== null + ? Number(object.gpWorkfileLimitPerQuery) + : undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery !== undefined && + object.gpWorkfileLimitFilesPerQuery !== null + ? Number(object.gpWorkfileLimitFilesPerQuery) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.gpWorkfileCompression = + object.gpWorkfileCompression !== undefined && + object.gpWorkfileCompression !== null + ? Boolean(object.gpWorkfileCompression) + : undefined; + message.maxStatementMem = + object.maxStatementMem !== undefined && object.maxStatementMem !== null + ? Number(object.maxStatementMem) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? logStatementFromJSON(object.logStatement) + : 0; + message.gpAddColumnInheritsTableSetting = + object.gpAddColumnInheritsTableSetting !== undefined && + object.gpAddColumnInheritsTableSetting !== null + ? Boolean(object.gpAddColumnInheritsTableSetting) + : undefined; + return message; + }, + + toJSON(message: Greenplumconfig622): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.gpWorkfileLimitPerSegment !== undefined && + (obj.gpWorkfileLimitPerSegment = message.gpWorkfileLimitPerSegment); + message.gpWorkfileLimitPerQuery !== undefined && + (obj.gpWorkfileLimitPerQuery = message.gpWorkfileLimitPerQuery); + message.gpWorkfileLimitFilesPerQuery !== undefined && + (obj.gpWorkfileLimitFilesPerQuery = message.gpWorkfileLimitFilesPerQuery); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.gpWorkfileCompression !== undefined && + (obj.gpWorkfileCompression = message.gpWorkfileCompression); + message.maxStatementMem !== undefined && + (obj.maxStatementMem = message.maxStatementMem); + message.logStatement !== undefined && + (obj.logStatement = logStatementToJSON(message.logStatement)); + message.gpAddColumnInheritsTableSetting !== undefined && + (obj.gpAddColumnInheritsTableSetting = + message.gpAddColumnInheritsTableSetting); + return obj; + }, + + fromPartial, I>>( + object: I + ): Greenplumconfig622 { + const message = { ...baseGreenplumconfig622 } as Greenplumconfig622; + message.maxConnections = object.maxConnections ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment ?? undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery ?? undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; + message.maxStatementMem = object.maxStatementMem ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.gpAddColumnInheritsTableSetting = + object.gpAddColumnInheritsTableSetting ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Greenplumconfig622.$type, Greenplumconfig622); + +const baseGreenplumconfigset617: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17", +}; + +export const Greenplumconfigset617 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17" as const, + + encode( + message: Greenplumconfigset617, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Greenplumconfig617.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Greenplumconfig617.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Greenplumconfig617.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Greenplumconfigset617 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Greenplumconfig617.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Greenplumconfig617.decode( reader, reader.uint32() - ).value; + ); break; - case 7: - message.gpWorkfileCompression = BoolValue.decode( + case 3: + message.defaultConfig = Greenplumconfig617.decode( reader, reader.uint32() - ).value; + ); break; default: reader.skipType(tag & 7); @@ -787,229 +2607,245 @@ export const Greenplumconfig617 = { return message; }, - fromJSON(object: any): Greenplumconfig617 { - const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; - message.maxConnections = - object.maxConnections !== undefined && object.maxConnections !== null - ? Number(object.maxConnections) - : undefined; - message.maxSlotWalKeepSize = - object.maxSlotWalKeepSize !== undefined && - object.maxSlotWalKeepSize !== null - ? Number(object.maxSlotWalKeepSize) - : undefined; - message.gpWorkfileLimitPerSegment = - object.gpWorkfileLimitPerSegment !== undefined && - object.gpWorkfileLimitPerSegment !== null - ? Number(object.gpWorkfileLimitPerSegment) - : undefined; - message.gpWorkfileLimitPerQuery = - object.gpWorkfileLimitPerQuery !== undefined && - object.gpWorkfileLimitPerQuery !== null - ? Number(object.gpWorkfileLimitPerQuery) - : undefined; - message.gpWorkfileLimitFilesPerQuery = - object.gpWorkfileLimitFilesPerQuery !== undefined && - object.gpWorkfileLimitFilesPerQuery !== null - ? Number(object.gpWorkfileLimitFilesPerQuery) + fromJSON(object: any): Greenplumconfigset617 { + const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Greenplumconfig617.fromJSON(object.effectiveConfig) : undefined; - message.maxPreparedTransactions = - object.maxPreparedTransactions !== undefined && - object.maxPreparedTransactions !== null - ? Number(object.maxPreparedTransactions) + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Greenplumconfig617.fromJSON(object.userConfig) : undefined; - message.gpWorkfileCompression = - object.gpWorkfileCompression !== undefined && - object.gpWorkfileCompression !== null - ? Boolean(object.gpWorkfileCompression) + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Greenplumconfig617.fromJSON(object.defaultConfig) : undefined; return message; }, - toJSON(message: Greenplumconfig617): unknown { + toJSON(message: Greenplumconfigset617): unknown { const obj: any = {}; - message.maxConnections !== undefined && - (obj.maxConnections = message.maxConnections); - message.maxSlotWalKeepSize !== undefined && - (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); - message.gpWorkfileLimitPerSegment !== undefined && - (obj.gpWorkfileLimitPerSegment = message.gpWorkfileLimitPerSegment); - message.gpWorkfileLimitPerQuery !== undefined && - (obj.gpWorkfileLimitPerQuery = message.gpWorkfileLimitPerQuery); - message.gpWorkfileLimitFilesPerQuery !== undefined && - (obj.gpWorkfileLimitFilesPerQuery = message.gpWorkfileLimitFilesPerQuery); - message.maxPreparedTransactions !== undefined && - (obj.maxPreparedTransactions = message.maxPreparedTransactions); - message.gpWorkfileCompression !== undefined && - (obj.gpWorkfileCompression = message.gpWorkfileCompression); + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Greenplumconfig617.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Greenplumconfig617.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Greenplumconfig617.toJSON(message.defaultConfig) + : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Greenplumconfig617 { - const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; - message.maxConnections = object.maxConnections ?? undefined; - message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; - message.gpWorkfileLimitPerSegment = - object.gpWorkfileLimitPerSegment ?? undefined; - message.gpWorkfileLimitPerQuery = - object.gpWorkfileLimitPerQuery ?? undefined; - message.gpWorkfileLimitFilesPerQuery = - object.gpWorkfileLimitFilesPerQuery ?? undefined; - message.maxPreparedTransactions = - object.maxPreparedTransactions ?? undefined; - message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; + ): Greenplumconfigset617 { + const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Greenplumconfig617.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Greenplumconfig617.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Greenplumconfig617.fromPartial(object.defaultConfig) + : undefined; return message; }, }; -messageTypeRegistry.set(Greenplumconfig617.$type, Greenplumconfig617); +messageTypeRegistry.set(Greenplumconfigset617.$type, Greenplumconfigset617); -const baseGreenplumconfig619: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19", - logStatement: 0, +const baseGreenplumconfigset619: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19", }; -export const Greenplumconfig619 = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19" as const, +export const Greenplumconfigset619 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19" as const, encode( - message: Greenplumconfig619, + message: Greenplumconfigset619, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.maxConnections !== undefined) { - Int64Value.encode( - { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + if (message.effectiveConfig !== undefined) { + Greenplumconfig619.encode( + message.effectiveConfig, writer.uint32(10).fork() ).ldelim(); } - if (message.maxSlotWalKeepSize !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.maxSlotWalKeepSize!, - }, + if (message.userConfig !== undefined) { + Greenplumconfig619.encode( + message.userConfig, writer.uint32(18).fork() ).ldelim(); } - if (message.gpWorkfileLimitPerSegment !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.gpWorkfileLimitPerSegment!, - }, + if (message.defaultConfig !== undefined) { + Greenplumconfig619.encode( + message.defaultConfig, writer.uint32(26).fork() ).ldelim(); } - if (message.gpWorkfileLimitPerQuery !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.gpWorkfileLimitPerQuery!, - }, - writer.uint32(34).fork() - ).ldelim(); - } - if (message.gpWorkfileLimitFilesPerQuery !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.gpWorkfileLimitFilesPerQuery!, - }, - writer.uint32(42).fork() - ).ldelim(); + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Greenplumconfigset619 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Greenplumconfig619.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Greenplumconfig619.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Greenplumconfig619.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } } - if (message.maxPreparedTransactions !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.maxPreparedTransactions!, - }, - writer.uint32(50).fork() + return message; + }, + + fromJSON(object: any): Greenplumconfigset619 { + const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Greenplumconfig619.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Greenplumconfig619.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Greenplumconfig619.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Greenplumconfigset619): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Greenplumconfig619.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Greenplumconfig619.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Greenplumconfig619.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Greenplumconfigset619 { + const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Greenplumconfig619.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Greenplumconfig619.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Greenplumconfig619.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Greenplumconfigset619.$type, Greenplumconfigset619); + +const baseGreenplumconfigset621: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_21", +}; + +export const Greenplumconfigset621 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_21" as const, + + encode( + message: Greenplumconfigset621, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Greenplumconfig621.encode( + message.effectiveConfig, + writer.uint32(10).fork() ).ldelim(); } - if (message.gpWorkfileCompression !== undefined) { - BoolValue.encode( - { - $type: "google.protobuf.BoolValue", - value: message.gpWorkfileCompression!, - }, - writer.uint32(58).fork() + if (message.userConfig !== undefined) { + Greenplumconfig621.encode( + message.userConfig, + writer.uint32(18).fork() ).ldelim(); } - if (message.maxStatementMem !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.maxStatementMem!, - }, - writer.uint32(66).fork() + if (message.defaultConfig !== undefined) { + Greenplumconfig621.encode( + message.defaultConfig, + writer.uint32(26).fork() ).ldelim(); } - if (message.logStatement !== 0) { - writer.uint32(72).int32(message.logStatement); - } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Greenplumconfig619 { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Greenplumconfigset621 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; + const message = { ...baseGreenplumconfigset621 } as Greenplumconfigset621; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.maxConnections = Int64Value.decode( + message.effectiveConfig = Greenplumconfig621.decode( reader, reader.uint32() - ).value; + ); break; case 2: - message.maxSlotWalKeepSize = Int64Value.decode( + message.userConfig = Greenplumconfig621.decode( reader, reader.uint32() - ).value; + ); break; case 3: - message.gpWorkfileLimitPerSegment = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 4: - message.gpWorkfileLimitPerQuery = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 5: - message.gpWorkfileLimitFilesPerQuery = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 6: - message.maxPreparedTransactions = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 7: - message.gpWorkfileCompression = BoolValue.decode( + message.defaultConfig = Greenplumconfig621.decode( reader, reader.uint32() - ).value; - break; - case 8: - message.maxStatementMem = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 9: - message.logStatement = reader.int32() as any; + ); break; default: reader.skipType(tag & 7); @@ -1019,124 +2855,87 @@ export const Greenplumconfig619 = { return message; }, - fromJSON(object: any): Greenplumconfig619 { - const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; - message.maxConnections = - object.maxConnections !== undefined && object.maxConnections !== null - ? Number(object.maxConnections) - : undefined; - message.maxSlotWalKeepSize = - object.maxSlotWalKeepSize !== undefined && - object.maxSlotWalKeepSize !== null - ? Number(object.maxSlotWalKeepSize) - : undefined; - message.gpWorkfileLimitPerSegment = - object.gpWorkfileLimitPerSegment !== undefined && - object.gpWorkfileLimitPerSegment !== null - ? Number(object.gpWorkfileLimitPerSegment) - : undefined; - message.gpWorkfileLimitPerQuery = - object.gpWorkfileLimitPerQuery !== undefined && - object.gpWorkfileLimitPerQuery !== null - ? Number(object.gpWorkfileLimitPerQuery) - : undefined; - message.gpWorkfileLimitFilesPerQuery = - object.gpWorkfileLimitFilesPerQuery !== undefined && - object.gpWorkfileLimitFilesPerQuery !== null - ? Number(object.gpWorkfileLimitFilesPerQuery) - : undefined; - message.maxPreparedTransactions = - object.maxPreparedTransactions !== undefined && - object.maxPreparedTransactions !== null - ? Number(object.maxPreparedTransactions) + fromJSON(object: any): Greenplumconfigset621 { + const message = { ...baseGreenplumconfigset621 } as Greenplumconfigset621; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Greenplumconfig621.fromJSON(object.effectiveConfig) : undefined; - message.gpWorkfileCompression = - object.gpWorkfileCompression !== undefined && - object.gpWorkfileCompression !== null - ? Boolean(object.gpWorkfileCompression) + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Greenplumconfig621.fromJSON(object.userConfig) : undefined; - message.maxStatementMem = - object.maxStatementMem !== undefined && object.maxStatementMem !== null - ? Number(object.maxStatementMem) + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Greenplumconfig621.fromJSON(object.defaultConfig) : undefined; - message.logStatement = - object.logStatement !== undefined && object.logStatement !== null - ? logStatementFromJSON(object.logStatement) - : 0; return message; }, - toJSON(message: Greenplumconfig619): unknown { + toJSON(message: Greenplumconfigset621): unknown { const obj: any = {}; - message.maxConnections !== undefined && - (obj.maxConnections = message.maxConnections); - message.maxSlotWalKeepSize !== undefined && - (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); - message.gpWorkfileLimitPerSegment !== undefined && - (obj.gpWorkfileLimitPerSegment = message.gpWorkfileLimitPerSegment); - message.gpWorkfileLimitPerQuery !== undefined && - (obj.gpWorkfileLimitPerQuery = message.gpWorkfileLimitPerQuery); - message.gpWorkfileLimitFilesPerQuery !== undefined && - (obj.gpWorkfileLimitFilesPerQuery = message.gpWorkfileLimitFilesPerQuery); - message.maxPreparedTransactions !== undefined && - (obj.maxPreparedTransactions = message.maxPreparedTransactions); - message.gpWorkfileCompression !== undefined && - (obj.gpWorkfileCompression = message.gpWorkfileCompression); - message.maxStatementMem !== undefined && - (obj.maxStatementMem = message.maxStatementMem); - message.logStatement !== undefined && - (obj.logStatement = logStatementToJSON(message.logStatement)); + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Greenplumconfig621.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Greenplumconfig621.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Greenplumconfig621.toJSON(message.defaultConfig) + : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Greenplumconfig619 { - const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; - message.maxConnections = object.maxConnections ?? undefined; - message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; - message.gpWorkfileLimitPerSegment = - object.gpWorkfileLimitPerSegment ?? undefined; - message.gpWorkfileLimitPerQuery = - object.gpWorkfileLimitPerQuery ?? undefined; - message.gpWorkfileLimitFilesPerQuery = - object.gpWorkfileLimitFilesPerQuery ?? undefined; - message.maxPreparedTransactions = - object.maxPreparedTransactions ?? undefined; - message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; - message.maxStatementMem = object.maxStatementMem ?? undefined; - message.logStatement = object.logStatement ?? 0; + ): Greenplumconfigset621 { + const message = { ...baseGreenplumconfigset621 } as Greenplumconfigset621; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Greenplumconfig621.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Greenplumconfig621.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Greenplumconfig621.fromPartial(object.defaultConfig) + : undefined; return message; }, }; -messageTypeRegistry.set(Greenplumconfig619.$type, Greenplumconfig619); +messageTypeRegistry.set(Greenplumconfigset621.$type, Greenplumconfigset621); -const baseGreenplumconfigset617: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17", +const baseGreenplumconfigset622: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_22", }; -export const Greenplumconfigset617 = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17" as const, +export const Greenplumconfigset622 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_22" as const, encode( - message: Greenplumconfigset617, + message: Greenplumconfigset622, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.effectiveConfig !== undefined) { - Greenplumconfig617.encode( + Greenplumconfig622.encode( message.effectiveConfig, writer.uint32(10).fork() ).ldelim(); } if (message.userConfig !== undefined) { - Greenplumconfig617.encode( + Greenplumconfig622.encode( message.userConfig, writer.uint32(18).fork() ).ldelim(); } if (message.defaultConfig !== undefined) { - Greenplumconfig617.encode( + Greenplumconfig622.encode( message.defaultConfig, writer.uint32(26).fork() ).ldelim(); @@ -1147,27 +2946,27 @@ export const Greenplumconfigset617 = { decode( input: _m0.Reader | Uint8Array, length?: number - ): Greenplumconfigset617 { + ): Greenplumconfigset622 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; + const message = { ...baseGreenplumconfigset622 } as Greenplumconfigset622; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.effectiveConfig = Greenplumconfig617.decode( + message.effectiveConfig = Greenplumconfig622.decode( reader, reader.uint32() ); break; case 2: - message.userConfig = Greenplumconfig617.decode( + message.userConfig = Greenplumconfig622.decode( reader, reader.uint32() ); break; case 3: - message.defaultConfig = Greenplumconfig617.decode( + message.defaultConfig = Greenplumconfig622.decode( reader, reader.uint32() ); @@ -1180,87 +2979,87 @@ export const Greenplumconfigset617 = { return message; }, - fromJSON(object: any): Greenplumconfigset617 { - const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; + fromJSON(object: any): Greenplumconfigset622 { + const message = { ...baseGreenplumconfigset622 } as Greenplumconfigset622; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? Greenplumconfig617.fromJSON(object.effectiveConfig) + ? Greenplumconfig622.fromJSON(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? Greenplumconfig617.fromJSON(object.userConfig) + ? Greenplumconfig622.fromJSON(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? Greenplumconfig617.fromJSON(object.defaultConfig) + ? Greenplumconfig622.fromJSON(object.defaultConfig) : undefined; return message; }, - toJSON(message: Greenplumconfigset617): unknown { + toJSON(message: Greenplumconfigset622): unknown { const obj: any = {}; message.effectiveConfig !== undefined && (obj.effectiveConfig = message.effectiveConfig - ? Greenplumconfig617.toJSON(message.effectiveConfig) + ? Greenplumconfig622.toJSON(message.effectiveConfig) : undefined); message.userConfig !== undefined && (obj.userConfig = message.userConfig - ? Greenplumconfig617.toJSON(message.userConfig) + ? Greenplumconfig622.toJSON(message.userConfig) : undefined); message.defaultConfig !== undefined && (obj.defaultConfig = message.defaultConfig - ? Greenplumconfig617.toJSON(message.defaultConfig) + ? Greenplumconfig622.toJSON(message.defaultConfig) : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Greenplumconfigset617 { - const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; + ): Greenplumconfigset622 { + const message = { ...baseGreenplumconfigset622 } as Greenplumconfigset622; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? Greenplumconfig617.fromPartial(object.effectiveConfig) + ? Greenplumconfig622.fromPartial(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? Greenplumconfig617.fromPartial(object.userConfig) + ? Greenplumconfig622.fromPartial(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? Greenplumconfig617.fromPartial(object.defaultConfig) + ? Greenplumconfig622.fromPartial(object.defaultConfig) : undefined; return message; }, }; -messageTypeRegistry.set(Greenplumconfigset617.$type, Greenplumconfigset617); +messageTypeRegistry.set(Greenplumconfigset622.$type, Greenplumconfigset622); -const baseGreenplumconfigset619: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19", +const baseGreenplumConfigSet6: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6", }; -export const Greenplumconfigset619 = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19" as const, +export const GreenplumConfigSet6 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6" as const, encode( - message: Greenplumconfigset619, + message: GreenplumConfigSet6, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.effectiveConfig !== undefined) { - Greenplumconfig619.encode( + GreenplumConfig6.encode( message.effectiveConfig, writer.uint32(10).fork() ).ldelim(); } if (message.userConfig !== undefined) { - Greenplumconfig619.encode( + GreenplumConfig6.encode( message.userConfig, writer.uint32(18).fork() ).ldelim(); } if (message.defaultConfig !== undefined) { - Greenplumconfig619.encode( + GreenplumConfig6.encode( message.defaultConfig, writer.uint32(26).fork() ).ldelim(); @@ -1268,30 +3067,24 @@ export const Greenplumconfigset619 = { return writer; }, - decode( - input: _m0.Reader | Uint8Array, - length?: number - ): Greenplumconfigset619 { + decode(input: _m0.Reader | Uint8Array, length?: number): GreenplumConfigSet6 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + const message = { ...baseGreenplumConfigSet6 } as GreenplumConfigSet6; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.effectiveConfig = Greenplumconfig619.decode( + message.effectiveConfig = GreenplumConfig6.decode( reader, reader.uint32() ); break; case 2: - message.userConfig = Greenplumconfig619.decode( - reader, - reader.uint32() - ); + message.userConfig = GreenplumConfig6.decode(reader, reader.uint32()); break; case 3: - message.defaultConfig = Greenplumconfig619.decode( + message.defaultConfig = GreenplumConfig6.decode( reader, reader.uint32() ); @@ -1304,61 +3097,61 @@ export const Greenplumconfigset619 = { return message; }, - fromJSON(object: any): Greenplumconfigset619 { - const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + fromJSON(object: any): GreenplumConfigSet6 { + const message = { ...baseGreenplumConfigSet6 } as GreenplumConfigSet6; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? Greenplumconfig619.fromJSON(object.effectiveConfig) + ? GreenplumConfig6.fromJSON(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? Greenplumconfig619.fromJSON(object.userConfig) + ? GreenplumConfig6.fromJSON(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? Greenplumconfig619.fromJSON(object.defaultConfig) + ? GreenplumConfig6.fromJSON(object.defaultConfig) : undefined; return message; }, - toJSON(message: Greenplumconfigset619): unknown { + toJSON(message: GreenplumConfigSet6): unknown { const obj: any = {}; message.effectiveConfig !== undefined && (obj.effectiveConfig = message.effectiveConfig - ? Greenplumconfig619.toJSON(message.effectiveConfig) + ? GreenplumConfig6.toJSON(message.effectiveConfig) : undefined); message.userConfig !== undefined && (obj.userConfig = message.userConfig - ? Greenplumconfig619.toJSON(message.userConfig) + ? GreenplumConfig6.toJSON(message.userConfig) : undefined); message.defaultConfig !== undefined && (obj.defaultConfig = message.defaultConfig - ? Greenplumconfig619.toJSON(message.defaultConfig) + ? GreenplumConfig6.toJSON(message.defaultConfig) : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Greenplumconfigset619 { - const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + ): GreenplumConfigSet6 { + const message = { ...baseGreenplumConfigSet6 } as GreenplumConfigSet6; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? Greenplumconfig619.fromPartial(object.effectiveConfig) + ? GreenplumConfig6.fromPartial(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? Greenplumconfig619.fromPartial(object.userConfig) + ? GreenplumConfig6.fromPartial(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? Greenplumconfig619.fromPartial(object.defaultConfig) + ? GreenplumConfig6.fromPartial(object.defaultConfig) : undefined; return message; }, }; -messageTypeRegistry.set(Greenplumconfigset619.$type, Greenplumconfigset619); +messageTypeRegistry.set(GreenplumConfigSet6.$type, GreenplumConfigSet6); const baseConnectionPoolerConfigSet: object = { $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet", diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/pxf.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/pxf.ts new file mode 100644 index 00000000..9e21d4e9 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/pxf.ts @@ -0,0 +1,394 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Int64Value, BoolValue } from "../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; + +export interface PXFConfig { + $type: "yandex.cloud.mdb.greenplum.v1.PXFConfig"; + /** Connection */ + connectionTimeout?: number; + uploadTimeout?: number; + /** Thread pool */ + maxThreads?: number; + poolAllowCoreThreadTimeout?: boolean; + poolCoreSize?: number; + poolQueueCapacity?: number; + poolMaxSize?: number; + /** JVM */ + xmx?: number; + xms?: number; +} + +export interface PXFConfigSet { + $type: "yandex.cloud.mdb.greenplum.v1.PXFConfigSet"; + effectiveConfig?: PXFConfig; + /** User-defined settings */ + userConfig?: PXFConfig; + /** Default configuration */ + defaultConfig?: PXFConfig; +} + +const basePXFConfig: object = { + $type: "yandex.cloud.mdb.greenplum.v1.PXFConfig", +}; + +export const PXFConfig = { + $type: "yandex.cloud.mdb.greenplum.v1.PXFConfig" as const, + + encode( + message: PXFConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connectionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.connectionTimeout!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.uploadTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.uploadTimeout! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.maxThreads !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxThreads! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.poolAllowCoreThreadTimeout !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.poolAllowCoreThreadTimeout!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.poolCoreSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.poolCoreSize! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.poolQueueCapacity !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.poolQueueCapacity!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.poolMaxSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.poolMaxSize! }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.xmx !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.xmx! }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.xms !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.xms! }, + writer.uint32(74).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PXFConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePXFConfig } as PXFConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.connectionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.uploadTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.maxThreads = Int64Value.decode(reader, reader.uint32()).value; + break; + case 4: + message.poolAllowCoreThreadTimeout = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.poolCoreSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.poolQueueCapacity = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.poolMaxSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.xmx = Int64Value.decode(reader, reader.uint32()).value; + break; + case 9: + message.xms = Int64Value.decode(reader, reader.uint32()).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PXFConfig { + const message = { ...basePXFConfig } as PXFConfig; + message.connectionTimeout = + object.connectionTimeout !== undefined && + object.connectionTimeout !== null + ? Number(object.connectionTimeout) + : undefined; + message.uploadTimeout = + object.uploadTimeout !== undefined && object.uploadTimeout !== null + ? Number(object.uploadTimeout) + : undefined; + message.maxThreads = + object.maxThreads !== undefined && object.maxThreads !== null + ? Number(object.maxThreads) + : undefined; + message.poolAllowCoreThreadTimeout = + object.poolAllowCoreThreadTimeout !== undefined && + object.poolAllowCoreThreadTimeout !== null + ? Boolean(object.poolAllowCoreThreadTimeout) + : undefined; + message.poolCoreSize = + object.poolCoreSize !== undefined && object.poolCoreSize !== null + ? Number(object.poolCoreSize) + : undefined; + message.poolQueueCapacity = + object.poolQueueCapacity !== undefined && + object.poolQueueCapacity !== null + ? Number(object.poolQueueCapacity) + : undefined; + message.poolMaxSize = + object.poolMaxSize !== undefined && object.poolMaxSize !== null + ? Number(object.poolMaxSize) + : undefined; + message.xmx = + object.xmx !== undefined && object.xmx !== null + ? Number(object.xmx) + : undefined; + message.xms = + object.xms !== undefined && object.xms !== null + ? Number(object.xms) + : undefined; + return message; + }, + + toJSON(message: PXFConfig): unknown { + const obj: any = {}; + message.connectionTimeout !== undefined && + (obj.connectionTimeout = message.connectionTimeout); + message.uploadTimeout !== undefined && + (obj.uploadTimeout = message.uploadTimeout); + message.maxThreads !== undefined && (obj.maxThreads = message.maxThreads); + message.poolAllowCoreThreadTimeout !== undefined && + (obj.poolAllowCoreThreadTimeout = message.poolAllowCoreThreadTimeout); + message.poolCoreSize !== undefined && + (obj.poolCoreSize = message.poolCoreSize); + message.poolQueueCapacity !== undefined && + (obj.poolQueueCapacity = message.poolQueueCapacity); + message.poolMaxSize !== undefined && + (obj.poolMaxSize = message.poolMaxSize); + message.xmx !== undefined && (obj.xmx = message.xmx); + message.xms !== undefined && (obj.xms = message.xms); + return obj; + }, + + fromPartial, I>>( + object: I + ): PXFConfig { + const message = { ...basePXFConfig } as PXFConfig; + message.connectionTimeout = object.connectionTimeout ?? undefined; + message.uploadTimeout = object.uploadTimeout ?? undefined; + message.maxThreads = object.maxThreads ?? undefined; + message.poolAllowCoreThreadTimeout = + object.poolAllowCoreThreadTimeout ?? undefined; + message.poolCoreSize = object.poolCoreSize ?? undefined; + message.poolQueueCapacity = object.poolQueueCapacity ?? undefined; + message.poolMaxSize = object.poolMaxSize ?? undefined; + message.xmx = object.xmx ?? undefined; + message.xms = object.xms ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(PXFConfig.$type, PXFConfig); + +const basePXFConfigSet: object = { + $type: "yandex.cloud.mdb.greenplum.v1.PXFConfigSet", +}; + +export const PXFConfigSet = { + $type: "yandex.cloud.mdb.greenplum.v1.PXFConfigSet" as const, + + encode( + message: PXFConfigSet, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + PXFConfig.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + PXFConfig.encode(message.userConfig, writer.uint32(18).fork()).ldelim(); + } + if (message.defaultConfig !== undefined) { + PXFConfig.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PXFConfigSet { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePXFConfigSet } as PXFConfigSet; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = PXFConfig.decode(reader, reader.uint32()); + break; + case 2: + message.userConfig = PXFConfig.decode(reader, reader.uint32()); + break; + case 3: + message.defaultConfig = PXFConfig.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PXFConfigSet { + const message = { ...basePXFConfigSet } as PXFConfigSet; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? PXFConfig.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? PXFConfig.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? PXFConfig.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: PXFConfigSet): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? PXFConfig.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? PXFConfig.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? PXFConfig.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): PXFConfigSet { + const message = { ...basePXFConfigSet } as PXFConfigSet; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? PXFConfig.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? PXFConfig.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? PXFConfig.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(PXFConfigSet.$type, PXFConfigSet); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/index.ts b/src/generated/yandex/cloud/mdb/index.ts index 5a4cab3b..ab2c2123 100644 --- a/src/generated/yandex/cloud/mdb/index.ts +++ b/src/generated/yandex/cloud/mdb/index.ts @@ -35,6 +35,7 @@ export * as greenplum_cluster_service from './greenplum/v1/cluster_service' export * as greenplum_config from './greenplum/v1/config' export * as greenplum_host from './greenplum/v1/host' export * as greenplum_maintenance from './greenplum/v1/maintenance' +export * as greenplum_pxf from './greenplum/v1/pxf' export * as greenplum_resource_preset from './greenplum/v1/resource_preset' export * as greenplum_resource_preset_service from './greenplum/v1/resource_preset_service' export * as kafka_cluster from './kafka/v1/cluster' @@ -71,6 +72,14 @@ export * as mysql_resource_preset from './mysql/v1/resource_preset' export * as mysql_resource_preset_service from './mysql/v1/resource_preset_service' export * as mysql_user from './mysql/v1/user' export * as mysql_user_service from './mysql/v1/user_service' +export * as opensearch_auth from './opensearch/v1/auth' +export * as opensearch_backup from './opensearch/v1/backup' +export * as opensearch_backup_service from './opensearch/v1/backup_service' +export * as opensearch_cluster from './opensearch/v1/cluster' +export * as opensearch_cluster_service from './opensearch/v1/cluster_service' +export * as opensearch_maintenance from './opensearch/v1/maintenance' +export * as opensearch_resource_preset from './opensearch/v1/resource_preset' +export * as opensearch_resource_preset_service from './opensearch/v1/resource_preset_service' export * as postgresql_backup from './postgresql/v1/backup' export * as postgresql_backup_service from './postgresql/v1/backup_service' export * as postgresql_cluster from './postgresql/v1/cluster' @@ -78,6 +87,8 @@ export * as postgresql_cluster_service from './postgresql/v1/cluster_service' export * as postgresql_database from './postgresql/v1/database' export * as postgresql_database_service from './postgresql/v1/database_service' export * as postgresql_maintenance from './postgresql/v1/maintenance' +export * as postgresql_perf_diag from './postgresql/v1/perf_diag' +export * as postgresql_perf_diag_service from './postgresql/v1/perf_diag_service' export * as postgresql_resource_preset from './postgresql/v1/resource_preset' export * as postgresql_resource_preset_service from './postgresql/v1/resource_preset_service' export * as postgresql_user from './postgresql/v1/user' @@ -112,6 +123,7 @@ export * as mongodb_mongodb6_0 from './mongodb/v1/config/mongodb6_0' export * as mongodb_mongodb6_0_enterprise from './mongodb/v1/config/mongodb6_0_enterprise' export * as mysql_mysql5_7 from './mysql/v1/config/mysql5_7' export * as mysql_mysql8_0 from './mysql/v1/config/mysql8_0' +export * as opensearch from './opensearch/v1/config/opensearch' export * as postgresql_host10 from './postgresql/v1/config/host10' export * as postgresql_host10_1c from './postgresql/v1/config/host10_1c' export * as postgresql_host11 from './postgresql/v1/config/host11' @@ -122,6 +134,10 @@ export * as postgresql_host13 from './postgresql/v1/config/host13' export * as postgresql_host13_1c from './postgresql/v1/config/host13_1c' export * as postgresql_host14 from './postgresql/v1/config/host14' export * as postgresql_host14_1c from './postgresql/v1/config/host14_1c' +export * as postgresql_host15 from './postgresql/v1/config/host15' +export * as postgresql_host15_1c from './postgresql/v1/config/host15_1c' +export * as postgresql_host16 from './postgresql/v1/config/host16' +export * as postgresql_host16_1c from './postgresql/v1/config/host16_1c' export * as postgresql_host9_6 from './postgresql/v1/config/host9_6' export * as postgresql_postgresql10 from './postgresql/v1/config/postgresql10' export * as postgresql_postgresql10_1c from './postgresql/v1/config/postgresql10_1c' @@ -133,7 +149,12 @@ export * as postgresql_postgresql13 from './postgresql/v1/config/postgresql13' export * as postgresql_postgresql13_1c from './postgresql/v1/config/postgresql13_1c' export * as postgresql_postgresql14 from './postgresql/v1/config/postgresql14' export * as postgresql_postgresql14_1c from './postgresql/v1/config/postgresql14_1c' +export * as postgresql_postgresql15 from './postgresql/v1/config/postgresql15' +export * as postgresql_postgresql15_1c from './postgresql/v1/config/postgresql15_1c' +export * as postgresql_postgresql16 from './postgresql/v1/config/postgresql16' +export * as postgresql_postgresql16_1c from './postgresql/v1/config/postgresql16_1c' export * as postgresql_postgresql9_6 from './postgresql/v1/config/postgresql9_6' +export * as redis from './redis/v1/config/redis' export * as redis_redis5_0 from './redis/v1/config/redis5_0' export * as redis_redis6_0 from './redis/v1/config/redis6_0' export * as redis_redis6_2 from './redis/v1/config/redis6_2' diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts b/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts index 71790be2..22abcd17 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts @@ -8,8 +8,11 @@ import { } from "../../../../../yandex/cloud/mdb/kafka/v1/maintenance"; import { CompressionType, + SaslMechanism, compressionTypeFromJSON, + saslMechanismFromJSON, compressionTypeToJSON, + saslMechanismToJSON, } from "../../../../../yandex/cloud/mdb/kafka/v1/common"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Int64Value, BoolValue } from "../../../../../google/protobuf/wrappers"; @@ -271,14 +274,14 @@ export interface ConfigSpec { schemaRegistry: boolean; /** Access policy for external services. */ access?: Access; + /** Configuration of REST API. */ + restApiConfig?: ConfigSpec_RestAPIConfig; } export interface ConfigSpec_Kafka { $type: "yandex.cloud.mdb.kafka.v1.ConfigSpec.Kafka"; /** Resources allocated to Kafka brokers. */ resources?: Resources; - kafkaConfig21?: Kafkaconfig21 | undefined; - kafkaConfig26?: Kafkaconfig26 | undefined; kafkaConfig28?: Kafkaconfig28 | undefined; kafkaConfig3?: KafkaConfig3 | undefined; } @@ -289,6 +292,12 @@ export interface ConfigSpec_Zookeeper { resources?: Resources; } +export interface ConfigSpec_RestAPIConfig { + $type: "yandex.cloud.mdb.kafka.v1.ConfigSpec.RestAPIConfig"; + /** Is REST API enabled for this cluster. */ + enabled: boolean; +} + export interface Resources { $type: "yandex.cloud.mdb.kafka.v1.Resources"; /** @@ -302,162 +311,6 @@ export interface Resources { diskTypeId: string; } -/** Kafka version 2.1 broker configuration. */ -export interface Kafkaconfig21 { - $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_1"; - /** Cluster topics compression type. */ - compressionType: CompressionType; - /** - * The number of messages accumulated on a log partition before messages are flushed to disk. - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_1.flush_messages] setting. - */ - logFlushIntervalMessages?: number; - /** - * The maximum time (in milliseconds) that a message in any topic is kept in memory before flushed to disk. - * If not set, the value of [log_flush_scheduler_interval_ms] is used. - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_1.flush_ms] setting. - */ - logFlushIntervalMs?: number; - /** - * The frequency of checks (in milliseconds) for any logs that need to be flushed to disk. - * This check is done by the log flusher. - */ - logFlushSchedulerIntervalMs?: number; - /** - * Partition size limit; Kafka will discard old log segments to free up space if `delete` [TopicConfig2_1.cleanup_policy] is in effect. - * This setting is helpful if you need to control the size of a log due to limited disk space. - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_1.retention_bytes] setting. - */ - logRetentionBytes?: number; - /** The number of hours to keep a log segment file before deleting it. */ - logRetentionHours?: number; - /** - * The number of minutes to keep a log segment file before deleting it. - * - * If not set, the value of [log_retention_hours] is used. - */ - logRetentionMinutes?: number; - /** - * The number of milliseconds to keep a log segment file before deleting it. - * - * If not set, the value of [log_retention_minutes] is used. - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_1.retention_ms] setting. - */ - logRetentionMs?: number; - /** - * The maximum size of a single log file. - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_1.segment_bytes] setting. - */ - logSegmentBytes?: number; - /** - * Should pre allocate file when create new segment? - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_1.preallocate] setting. - */ - logPreallocate?: boolean; - /** The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used. */ - socketSendBufferBytes?: number; - /** The SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used. */ - socketReceiveBufferBytes?: number; - /** Enable auto creation of topic on the server */ - autoCreateTopicsEnable?: boolean; - /** Default number of partitions per topic on the whole cluster */ - numPartitions?: number; - /** Default replication factor of the topic on the whole cluster */ - defaultReplicationFactor?: number; - /** The largest record batch size allowed by Kafka. Default value: 1048588. */ - messageMaxBytes?: number; - /** The number of bytes of messages to attempt to fetch for each partition. Default value: 1048576. */ - replicaFetchMaxBytes?: number; - /** A list of cipher suites. */ - sslCipherSuites: string[]; - /** Offset storage time after a consumer group loses all its consumers. Default: 10080. */ - offsetsRetentionMinutes?: number; -} - -/** Kafka version 2.6 broker configuration. */ -export interface Kafkaconfig26 { - $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_6"; - /** Cluster topics compression type. */ - compressionType: CompressionType; - /** - * The number of messages accumulated on a log partition before messages are flushed to disk. - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_6.flush_messages] setting. - */ - logFlushIntervalMessages?: number; - /** - * The maximum time (in milliseconds) that a message in any topic is kept in memory before flushed to disk. - * If not set, the value of [log_flush_scheduler_interval_ms] is used. - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_6.flush_ms] setting. - */ - logFlushIntervalMs?: number; - /** - * The frequency of checks (in milliseconds) for any logs that need to be flushed to disk. - * This check is done by the log flusher. - */ - logFlushSchedulerIntervalMs?: number; - /** - * Partition size limit; Kafka will discard old log segments to free up space if `delete` [TopicConfig2_6.cleanup_policy] is in effect. - * This setting is helpful if you need to control the size of a log due to limited disk space. - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_6.retention_bytes] setting. - */ - logRetentionBytes?: number; - /** The number of hours to keep a log segment file before deleting it. */ - logRetentionHours?: number; - /** - * The number of minutes to keep a log segment file before deleting it. - * - * If not set, the value of [log_retention_hours] is used. - */ - logRetentionMinutes?: number; - /** - * The number of milliseconds to keep a log segment file before deleting it. - * - * If not set, the value of [log_retention_minutes] is used. - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_6.retention_ms] setting. - */ - logRetentionMs?: number; - /** - * The maximum size of a single log file. - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_6.segment_bytes] setting. - */ - logSegmentBytes?: number; - /** - * Should pre allocate file when create new segment? - * - * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_6.preallocate] setting. - */ - logPreallocate?: boolean; - /** The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used. */ - socketSendBufferBytes?: number; - /** The SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used. */ - socketReceiveBufferBytes?: number; - /** Enable auto creation of topic on the server */ - autoCreateTopicsEnable?: boolean; - /** Default number of partitions per topic on the whole cluster */ - numPartitions?: number; - /** Default replication factor of the topic on the whole cluster */ - defaultReplicationFactor?: number; - /** The largest record batch size allowed by Kafka. Default value: 1048588. */ - messageMaxBytes?: number; - /** The number of bytes of messages to attempt to fetch for each partition. Default value: 1048576. */ - replicaFetchMaxBytes?: number; - /** A list of cipher suites. */ - sslCipherSuites: string[]; - /** Offset storage time after a consumer group loses all its consumers. Default: 10080. */ - offsetsRetentionMinutes?: number; -} - /** Kafka version 2.8 broker configuration. */ export interface Kafkaconfig28 { $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_8"; @@ -534,6 +387,8 @@ export interface Kafkaconfig28 { sslCipherSuites: string[]; /** Offset storage time after a consumer group loses all its consumers. Default: 10080. */ offsetsRetentionMinutes?: number; + /** The list of SASL mechanisms enabled in the Kafka server. Default: [SCRAM_SHA_512]. */ + saslEnabledMechanisms: SaslMechanism[]; } /** Kafka version 3.x broker configuration. */ @@ -612,6 +467,8 @@ export interface KafkaConfig3 { sslCipherSuites: string[]; /** Offset storage time after a consumer group loses all its consumers. Default: 10080. */ offsetsRetentionMinutes?: number; + /** The list of SASL mechanisms enabled in the Kafka server. Default: [SCRAM_SHA_512]. */ + saslEnabledMechanisms: SaslMechanism[]; } /** Cluster host metadata. */ @@ -1288,6 +1145,12 @@ export const ConfigSpec = { if (message.access !== undefined) { Access.encode(message.access, writer.uint32(74).fork()).ldelim(); } + if (message.restApiConfig !== undefined) { + ConfigSpec_RestAPIConfig.encode( + message.restApiConfig, + writer.uint32(82).fork() + ).ldelim(); + } return writer; }, @@ -1332,6 +1195,12 @@ export const ConfigSpec = { case 9: message.access = Access.decode(reader, reader.uint32()); break; + case 10: + message.restApiConfig = ConfigSpec_RestAPIConfig.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -1375,6 +1244,10 @@ export const ConfigSpec = { object.access !== undefined && object.access !== null ? Access.fromJSON(object.access) : undefined; + message.restApiConfig = + object.restApiConfig !== undefined && object.restApiConfig !== null + ? ConfigSpec_RestAPIConfig.fromJSON(object.restApiConfig) + : undefined; return message; }, @@ -1404,6 +1277,10 @@ export const ConfigSpec = { (obj.schemaRegistry = message.schemaRegistry); message.access !== undefined && (obj.access = message.access ? Access.toJSON(message.access) : undefined); + message.restApiConfig !== undefined && + (obj.restApiConfig = message.restApiConfig + ? ConfigSpec_RestAPIConfig.toJSON(message.restApiConfig) + : undefined); return obj; }, @@ -1429,6 +1306,10 @@ export const ConfigSpec = { object.access !== undefined && object.access !== null ? Access.fromPartial(object.access) : undefined; + message.restApiConfig = + object.restApiConfig !== undefined && object.restApiConfig !== null + ? ConfigSpec_RestAPIConfig.fromPartial(object.restApiConfig) + : undefined; return message; }, }; @@ -1449,18 +1330,6 @@ export const ConfigSpec_Kafka = { if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); } - if (message.kafkaConfig21 !== undefined) { - Kafkaconfig21.encode( - message.kafkaConfig21, - writer.uint32(18).fork() - ).ldelim(); - } - if (message.kafkaConfig26 !== undefined) { - Kafkaconfig26.encode( - message.kafkaConfig26, - writer.uint32(26).fork() - ).ldelim(); - } if (message.kafkaConfig28 !== undefined) { Kafkaconfig28.encode( message.kafkaConfig28, @@ -1486,12 +1355,6 @@ export const ConfigSpec_Kafka = { case 1: message.resources = Resources.decode(reader, reader.uint32()); break; - case 2: - message.kafkaConfig21 = Kafkaconfig21.decode(reader, reader.uint32()); - break; - case 3: - message.kafkaConfig26 = Kafkaconfig26.decode(reader, reader.uint32()); - break; case 4: message.kafkaConfig28 = Kafkaconfig28.decode(reader, reader.uint32()); break; @@ -1512,14 +1375,6 @@ export const ConfigSpec_Kafka = { object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) : undefined; - message.kafkaConfig21 = - object.kafkaConfig_2_1 !== undefined && object.kafkaConfig_2_1 !== null - ? Kafkaconfig21.fromJSON(object.kafkaConfig_2_1) - : undefined; - message.kafkaConfig26 = - object.kafkaConfig_2_6 !== undefined && object.kafkaConfig_2_6 !== null - ? Kafkaconfig26.fromJSON(object.kafkaConfig_2_6) - : undefined; message.kafkaConfig28 = object.kafkaConfig_2_8 !== undefined && object.kafkaConfig_2_8 !== null ? Kafkaconfig28.fromJSON(object.kafkaConfig_2_8) @@ -1537,14 +1392,6 @@ export const ConfigSpec_Kafka = { (obj.resources = message.resources ? Resources.toJSON(message.resources) : undefined); - message.kafkaConfig21 !== undefined && - (obj.kafkaConfig_2_1 = message.kafkaConfig21 - ? Kafkaconfig21.toJSON(message.kafkaConfig21) - : undefined); - message.kafkaConfig26 !== undefined && - (obj.kafkaConfig_2_6 = message.kafkaConfig26 - ? Kafkaconfig26.toJSON(message.kafkaConfig26) - : undefined); message.kafkaConfig28 !== undefined && (obj.kafkaConfig_2_8 = message.kafkaConfig28 ? Kafkaconfig28.toJSON(message.kafkaConfig28) @@ -1564,14 +1411,6 @@ export const ConfigSpec_Kafka = { object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) : undefined; - message.kafkaConfig21 = - object.kafkaConfig21 !== undefined && object.kafkaConfig21 !== null - ? Kafkaconfig21.fromPartial(object.kafkaConfig21) - : undefined; - message.kafkaConfig26 = - object.kafkaConfig26 !== undefined && object.kafkaConfig26 !== null - ? Kafkaconfig26.fromPartial(object.kafkaConfig26) - : undefined; message.kafkaConfig28 = object.kafkaConfig28 !== undefined && object.kafkaConfig28 !== null ? Kafkaconfig28.fromPartial(object.kafkaConfig28) @@ -1656,6 +1495,80 @@ export const ConfigSpec_Zookeeper = { messageTypeRegistry.set(ConfigSpec_Zookeeper.$type, ConfigSpec_Zookeeper); +const baseConfigSpec_RestAPIConfig: object = { + $type: "yandex.cloud.mdb.kafka.v1.ConfigSpec.RestAPIConfig", + enabled: false, +}; + +export const ConfigSpec_RestAPIConfig = { + $type: "yandex.cloud.mdb.kafka.v1.ConfigSpec.RestAPIConfig" as const, + + encode( + message: ConfigSpec_RestAPIConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enabled === true) { + writer.uint32(8).bool(message.enabled); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ConfigSpec_RestAPIConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseConfigSpec_RestAPIConfig, + } as ConfigSpec_RestAPIConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ConfigSpec_RestAPIConfig { + const message = { + ...baseConfigSpec_RestAPIConfig, + } as ConfigSpec_RestAPIConfig; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + return message; + }, + + toJSON(message: ConfigSpec_RestAPIConfig): unknown { + const obj: any = {}; + message.enabled !== undefined && (obj.enabled = message.enabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): ConfigSpec_RestAPIConfig { + const message = { + ...baseConfigSpec_RestAPIConfig, + } as ConfigSpec_RestAPIConfig; + message.enabled = object.enabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + ConfigSpec_RestAPIConfig.$type, + ConfigSpec_RestAPIConfig +); + const baseResources: object = { $type: "yandex.cloud.mdb.kafka.v1.Resources", resourcePresetId: "", @@ -1746,17 +1659,18 @@ export const Resources = { messageTypeRegistry.set(Resources.$type, Resources); -const baseKafkaconfig21: object = { - $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_1", +const baseKafkaconfig28: object = { + $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_8", compressionType: 0, sslCipherSuites: "", + saslEnabledMechanisms: 0, }; -export const Kafkaconfig21 = { - $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_1" as const, +export const Kafkaconfig28 = { + $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_8" as const, encode( - message: Kafkaconfig21, + message: Kafkaconfig28, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.compressionType !== 0) { @@ -1909,14 +1823,20 @@ export const Kafkaconfig21 = { writer.uint32(154).fork() ).ldelim(); } + writer.uint32(162).fork(); + for (const v of message.saslEnabledMechanisms) { + writer.int32(v); + } + writer.ldelim(); return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Kafkaconfig21 { + decode(input: _m0.Reader | Uint8Array, length?: number): Kafkaconfig28 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseKafkaconfig21 } as Kafkaconfig21; + const message = { ...baseKafkaconfig28 } as Kafkaconfig28; message.sslCipherSuites = []; + message.saslEnabledMechanisms = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2028,6 +1948,16 @@ export const Kafkaconfig21 = { reader.uint32() ).value; break; + case 20: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.saslEnabledMechanisms.push(reader.int32() as any); + } + } else { + message.saslEnabledMechanisms.push(reader.int32() as any); + } + break; default: reader.skipType(tag & 7); break; @@ -2036,8 +1966,8 @@ export const Kafkaconfig21 = { return message; }, - fromJSON(object: any): Kafkaconfig21 { - const message = { ...baseKafkaconfig21 } as Kafkaconfig21; + fromJSON(object: any): Kafkaconfig28 { + const message = { ...baseKafkaconfig28 } as Kafkaconfig28; message.compressionType = object.compressionType !== undefined && object.compressionType !== null ? compressionTypeFromJSON(object.compressionType) @@ -2125,10 +2055,13 @@ export const Kafkaconfig21 = { object.offsetsRetentionMinutes !== null ? Number(object.offsetsRetentionMinutes) : undefined; + message.saslEnabledMechanisms = (object.saslEnabledMechanisms ?? []).map( + (e: any) => saslMechanismFromJSON(e) + ); return message; }, - toJSON(message: Kafkaconfig21): unknown { + toJSON(message: Kafkaconfig28): unknown { const obj: any = {}; message.compressionType !== undefined && (obj.compressionType = compressionTypeToJSON(message.compressionType)); @@ -2171,13 +2104,20 @@ export const Kafkaconfig21 = { } message.offsetsRetentionMinutes !== undefined && (obj.offsetsRetentionMinutes = message.offsetsRetentionMinutes); + if (message.saslEnabledMechanisms) { + obj.saslEnabledMechanisms = message.saslEnabledMechanisms.map((e) => + saslMechanismToJSON(e) + ); + } else { + obj.saslEnabledMechanisms = []; + } return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Kafkaconfig21 { - const message = { ...baseKafkaconfig21 } as Kafkaconfig21; + ): Kafkaconfig28 { + const message = { ...baseKafkaconfig28 } as Kafkaconfig28; message.compressionType = object.compressionType ?? 0; message.logFlushIntervalMessages = object.logFlushIntervalMessages ?? undefined; @@ -2202,23 +2142,26 @@ export const Kafkaconfig21 = { message.sslCipherSuites = object.sslCipherSuites?.map((e) => e) || []; message.offsetsRetentionMinutes = object.offsetsRetentionMinutes ?? undefined; + message.saslEnabledMechanisms = + object.saslEnabledMechanisms?.map((e) => e) || []; return message; }, }; -messageTypeRegistry.set(Kafkaconfig21.$type, Kafkaconfig21); +messageTypeRegistry.set(Kafkaconfig28.$type, Kafkaconfig28); -const baseKafkaconfig26: object = { - $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_6", +const baseKafkaConfig3: object = { + $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig3", compressionType: 0, sslCipherSuites: "", + saslEnabledMechanisms: 0, }; -export const Kafkaconfig26 = { - $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_6" as const, +export const KafkaConfig3 = { + $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig3" as const, encode( - message: Kafkaconfig26, + message: KafkaConfig3, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.compressionType !== 0) { @@ -2371,14 +2314,20 @@ export const Kafkaconfig26 = { writer.uint32(154).fork() ).ldelim(); } + writer.uint32(162).fork(); + for (const v of message.saslEnabledMechanisms) { + writer.int32(v); + } + writer.ldelim(); return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Kafkaconfig26 { + decode(input: _m0.Reader | Uint8Array, length?: number): KafkaConfig3 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseKafkaconfig26 } as Kafkaconfig26; + const message = { ...baseKafkaConfig3 } as KafkaConfig3; message.sslCipherSuites = []; + message.saslEnabledMechanisms = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2490,6 +2439,16 @@ export const Kafkaconfig26 = { reader.uint32() ).value; break; + case 20: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.saslEnabledMechanisms.push(reader.int32() as any); + } + } else { + message.saslEnabledMechanisms.push(reader.int32() as any); + } + break; default: reader.skipType(tag & 7); break; @@ -2498,8 +2457,8 @@ export const Kafkaconfig26 = { return message; }, - fromJSON(object: any): Kafkaconfig26 { - const message = { ...baseKafkaconfig26 } as Kafkaconfig26; + fromJSON(object: any): KafkaConfig3 { + const message = { ...baseKafkaConfig3 } as KafkaConfig3; message.compressionType = object.compressionType !== undefined && object.compressionType !== null ? compressionTypeFromJSON(object.compressionType) @@ -2587,10 +2546,13 @@ export const Kafkaconfig26 = { object.offsetsRetentionMinutes !== null ? Number(object.offsetsRetentionMinutes) : undefined; + message.saslEnabledMechanisms = (object.saslEnabledMechanisms ?? []).map( + (e: any) => saslMechanismFromJSON(e) + ); return message; }, - toJSON(message: Kafkaconfig26): unknown { + toJSON(message: KafkaConfig3): unknown { const obj: any = {}; message.compressionType !== undefined && (obj.compressionType = compressionTypeToJSON(message.compressionType)); @@ -2633,930 +2595,13 @@ export const Kafkaconfig26 = { } message.offsetsRetentionMinutes !== undefined && (obj.offsetsRetentionMinutes = message.offsetsRetentionMinutes); - return obj; - }, - - fromPartial, I>>( - object: I - ): Kafkaconfig26 { - const message = { ...baseKafkaconfig26 } as Kafkaconfig26; - message.compressionType = object.compressionType ?? 0; - message.logFlushIntervalMessages = - object.logFlushIntervalMessages ?? undefined; - message.logFlushIntervalMs = object.logFlushIntervalMs ?? undefined; - message.logFlushSchedulerIntervalMs = - object.logFlushSchedulerIntervalMs ?? undefined; - message.logRetentionBytes = object.logRetentionBytes ?? undefined; - message.logRetentionHours = object.logRetentionHours ?? undefined; - message.logRetentionMinutes = object.logRetentionMinutes ?? undefined; - message.logRetentionMs = object.logRetentionMs ?? undefined; - message.logSegmentBytes = object.logSegmentBytes ?? undefined; - message.logPreallocate = object.logPreallocate ?? undefined; - message.socketSendBufferBytes = object.socketSendBufferBytes ?? undefined; - message.socketReceiveBufferBytes = - object.socketReceiveBufferBytes ?? undefined; - message.autoCreateTopicsEnable = object.autoCreateTopicsEnable ?? undefined; - message.numPartitions = object.numPartitions ?? undefined; - message.defaultReplicationFactor = - object.defaultReplicationFactor ?? undefined; - message.messageMaxBytes = object.messageMaxBytes ?? undefined; - message.replicaFetchMaxBytes = object.replicaFetchMaxBytes ?? undefined; - message.sslCipherSuites = object.sslCipherSuites?.map((e) => e) || []; - message.offsetsRetentionMinutes = - object.offsetsRetentionMinutes ?? undefined; - return message; - }, -}; - -messageTypeRegistry.set(Kafkaconfig26.$type, Kafkaconfig26); - -const baseKafkaconfig28: object = { - $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_8", - compressionType: 0, - sslCipherSuites: "", -}; - -export const Kafkaconfig28 = { - $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_8" as const, - - encode( - message: Kafkaconfig28, - writer: _m0.Writer = _m0.Writer.create() - ): _m0.Writer { - if (message.compressionType !== 0) { - writer.uint32(8).int32(message.compressionType); - } - if (message.logFlushIntervalMessages !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logFlushIntervalMessages!, - }, - writer.uint32(18).fork() - ).ldelim(); - } - if (message.logFlushIntervalMs !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logFlushIntervalMs!, - }, - writer.uint32(26).fork() - ).ldelim(); - } - if (message.logFlushSchedulerIntervalMs !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logFlushSchedulerIntervalMs!, - }, - writer.uint32(34).fork() - ).ldelim(); - } - if (message.logRetentionBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logRetentionBytes!, - }, - writer.uint32(42).fork() - ).ldelim(); - } - if (message.logRetentionHours !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logRetentionHours!, - }, - writer.uint32(50).fork() - ).ldelim(); - } - if (message.logRetentionMinutes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logRetentionMinutes!, - }, - writer.uint32(58).fork() - ).ldelim(); - } - if (message.logRetentionMs !== undefined) { - Int64Value.encode( - { $type: "google.protobuf.Int64Value", value: message.logRetentionMs! }, - writer.uint32(66).fork() - ).ldelim(); - } - if (message.logSegmentBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logSegmentBytes!, - }, - writer.uint32(74).fork() - ).ldelim(); - } - if (message.logPreallocate !== undefined) { - BoolValue.encode( - { $type: "google.protobuf.BoolValue", value: message.logPreallocate! }, - writer.uint32(82).fork() - ).ldelim(); - } - if (message.socketSendBufferBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.socketSendBufferBytes!, - }, - writer.uint32(90).fork() - ).ldelim(); - } - if (message.socketReceiveBufferBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.socketReceiveBufferBytes!, - }, - writer.uint32(98).fork() - ).ldelim(); - } - if (message.autoCreateTopicsEnable !== undefined) { - BoolValue.encode( - { - $type: "google.protobuf.BoolValue", - value: message.autoCreateTopicsEnable!, - }, - writer.uint32(106).fork() - ).ldelim(); - } - if (message.numPartitions !== undefined) { - Int64Value.encode( - { $type: "google.protobuf.Int64Value", value: message.numPartitions! }, - writer.uint32(114).fork() - ).ldelim(); - } - if (message.defaultReplicationFactor !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.defaultReplicationFactor!, - }, - writer.uint32(122).fork() - ).ldelim(); - } - if (message.messageMaxBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.messageMaxBytes!, - }, - writer.uint32(130).fork() - ).ldelim(); - } - if (message.replicaFetchMaxBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.replicaFetchMaxBytes!, - }, - writer.uint32(138).fork() - ).ldelim(); - } - for (const v of message.sslCipherSuites) { - writer.uint32(146).string(v!); - } - if (message.offsetsRetentionMinutes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.offsetsRetentionMinutes!, - }, - writer.uint32(154).fork() - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Kafkaconfig28 { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseKafkaconfig28 } as Kafkaconfig28; - message.sslCipherSuites = []; - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.compressionType = reader.int32() as any; - break; - case 2: - message.logFlushIntervalMessages = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 3: - message.logFlushIntervalMs = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 4: - message.logFlushSchedulerIntervalMs = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 5: - message.logRetentionBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 6: - message.logRetentionHours = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 7: - message.logRetentionMinutes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 8: - message.logRetentionMs = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 9: - message.logSegmentBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 10: - message.logPreallocate = BoolValue.decode( - reader, - reader.uint32() - ).value; - break; - case 11: - message.socketSendBufferBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 12: - message.socketReceiveBufferBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 13: - message.autoCreateTopicsEnable = BoolValue.decode( - reader, - reader.uint32() - ).value; - break; - case 14: - message.numPartitions = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 15: - message.defaultReplicationFactor = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 16: - message.messageMaxBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 17: - message.replicaFetchMaxBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 18: - message.sslCipherSuites.push(reader.string()); - break; - case 19: - message.offsetsRetentionMinutes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Kafkaconfig28 { - const message = { ...baseKafkaconfig28 } as Kafkaconfig28; - message.compressionType = - object.compressionType !== undefined && object.compressionType !== null - ? compressionTypeFromJSON(object.compressionType) - : 0; - message.logFlushIntervalMessages = - object.logFlushIntervalMessages !== undefined && - object.logFlushIntervalMessages !== null - ? Number(object.logFlushIntervalMessages) - : undefined; - message.logFlushIntervalMs = - object.logFlushIntervalMs !== undefined && - object.logFlushIntervalMs !== null - ? Number(object.logFlushIntervalMs) - : undefined; - message.logFlushSchedulerIntervalMs = - object.logFlushSchedulerIntervalMs !== undefined && - object.logFlushSchedulerIntervalMs !== null - ? Number(object.logFlushSchedulerIntervalMs) - : undefined; - message.logRetentionBytes = - object.logRetentionBytes !== undefined && - object.logRetentionBytes !== null - ? Number(object.logRetentionBytes) - : undefined; - message.logRetentionHours = - object.logRetentionHours !== undefined && - object.logRetentionHours !== null - ? Number(object.logRetentionHours) - : undefined; - message.logRetentionMinutes = - object.logRetentionMinutes !== undefined && - object.logRetentionMinutes !== null - ? Number(object.logRetentionMinutes) - : undefined; - message.logRetentionMs = - object.logRetentionMs !== undefined && object.logRetentionMs !== null - ? Number(object.logRetentionMs) - : undefined; - message.logSegmentBytes = - object.logSegmentBytes !== undefined && object.logSegmentBytes !== null - ? Number(object.logSegmentBytes) - : undefined; - message.logPreallocate = - object.logPreallocate !== undefined && object.logPreallocate !== null - ? Boolean(object.logPreallocate) - : undefined; - message.socketSendBufferBytes = - object.socketSendBufferBytes !== undefined && - object.socketSendBufferBytes !== null - ? Number(object.socketSendBufferBytes) - : undefined; - message.socketReceiveBufferBytes = - object.socketReceiveBufferBytes !== undefined && - object.socketReceiveBufferBytes !== null - ? Number(object.socketReceiveBufferBytes) - : undefined; - message.autoCreateTopicsEnable = - object.autoCreateTopicsEnable !== undefined && - object.autoCreateTopicsEnable !== null - ? Boolean(object.autoCreateTopicsEnable) - : undefined; - message.numPartitions = - object.numPartitions !== undefined && object.numPartitions !== null - ? Number(object.numPartitions) - : undefined; - message.defaultReplicationFactor = - object.defaultReplicationFactor !== undefined && - object.defaultReplicationFactor !== null - ? Number(object.defaultReplicationFactor) - : undefined; - message.messageMaxBytes = - object.messageMaxBytes !== undefined && object.messageMaxBytes !== null - ? Number(object.messageMaxBytes) - : undefined; - message.replicaFetchMaxBytes = - object.replicaFetchMaxBytes !== undefined && - object.replicaFetchMaxBytes !== null - ? Number(object.replicaFetchMaxBytes) - : undefined; - message.sslCipherSuites = (object.sslCipherSuites ?? []).map((e: any) => - String(e) - ); - message.offsetsRetentionMinutes = - object.offsetsRetentionMinutes !== undefined && - object.offsetsRetentionMinutes !== null - ? Number(object.offsetsRetentionMinutes) - : undefined; - return message; - }, - - toJSON(message: Kafkaconfig28): unknown { - const obj: any = {}; - message.compressionType !== undefined && - (obj.compressionType = compressionTypeToJSON(message.compressionType)); - message.logFlushIntervalMessages !== undefined && - (obj.logFlushIntervalMessages = message.logFlushIntervalMessages); - message.logFlushIntervalMs !== undefined && - (obj.logFlushIntervalMs = message.logFlushIntervalMs); - message.logFlushSchedulerIntervalMs !== undefined && - (obj.logFlushSchedulerIntervalMs = message.logFlushSchedulerIntervalMs); - message.logRetentionBytes !== undefined && - (obj.logRetentionBytes = message.logRetentionBytes); - message.logRetentionHours !== undefined && - (obj.logRetentionHours = message.logRetentionHours); - message.logRetentionMinutes !== undefined && - (obj.logRetentionMinutes = message.logRetentionMinutes); - message.logRetentionMs !== undefined && - (obj.logRetentionMs = message.logRetentionMs); - message.logSegmentBytes !== undefined && - (obj.logSegmentBytes = message.logSegmentBytes); - message.logPreallocate !== undefined && - (obj.logPreallocate = message.logPreallocate); - message.socketSendBufferBytes !== undefined && - (obj.socketSendBufferBytes = message.socketSendBufferBytes); - message.socketReceiveBufferBytes !== undefined && - (obj.socketReceiveBufferBytes = message.socketReceiveBufferBytes); - message.autoCreateTopicsEnable !== undefined && - (obj.autoCreateTopicsEnable = message.autoCreateTopicsEnable); - message.numPartitions !== undefined && - (obj.numPartitions = message.numPartitions); - message.defaultReplicationFactor !== undefined && - (obj.defaultReplicationFactor = message.defaultReplicationFactor); - message.messageMaxBytes !== undefined && - (obj.messageMaxBytes = message.messageMaxBytes); - message.replicaFetchMaxBytes !== undefined && - (obj.replicaFetchMaxBytes = message.replicaFetchMaxBytes); - if (message.sslCipherSuites) { - obj.sslCipherSuites = message.sslCipherSuites.map((e) => e); - } else { - obj.sslCipherSuites = []; - } - message.offsetsRetentionMinutes !== undefined && - (obj.offsetsRetentionMinutes = message.offsetsRetentionMinutes); - return obj; - }, - - fromPartial, I>>( - object: I - ): Kafkaconfig28 { - const message = { ...baseKafkaconfig28 } as Kafkaconfig28; - message.compressionType = object.compressionType ?? 0; - message.logFlushIntervalMessages = - object.logFlushIntervalMessages ?? undefined; - message.logFlushIntervalMs = object.logFlushIntervalMs ?? undefined; - message.logFlushSchedulerIntervalMs = - object.logFlushSchedulerIntervalMs ?? undefined; - message.logRetentionBytes = object.logRetentionBytes ?? undefined; - message.logRetentionHours = object.logRetentionHours ?? undefined; - message.logRetentionMinutes = object.logRetentionMinutes ?? undefined; - message.logRetentionMs = object.logRetentionMs ?? undefined; - message.logSegmentBytes = object.logSegmentBytes ?? undefined; - message.logPreallocate = object.logPreallocate ?? undefined; - message.socketSendBufferBytes = object.socketSendBufferBytes ?? undefined; - message.socketReceiveBufferBytes = - object.socketReceiveBufferBytes ?? undefined; - message.autoCreateTopicsEnable = object.autoCreateTopicsEnable ?? undefined; - message.numPartitions = object.numPartitions ?? undefined; - message.defaultReplicationFactor = - object.defaultReplicationFactor ?? undefined; - message.messageMaxBytes = object.messageMaxBytes ?? undefined; - message.replicaFetchMaxBytes = object.replicaFetchMaxBytes ?? undefined; - message.sslCipherSuites = object.sslCipherSuites?.map((e) => e) || []; - message.offsetsRetentionMinutes = - object.offsetsRetentionMinutes ?? undefined; - return message; - }, -}; - -messageTypeRegistry.set(Kafkaconfig28.$type, Kafkaconfig28); - -const baseKafkaConfig3: object = { - $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig3", - compressionType: 0, - sslCipherSuites: "", -}; - -export const KafkaConfig3 = { - $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig3" as const, - - encode( - message: KafkaConfig3, - writer: _m0.Writer = _m0.Writer.create() - ): _m0.Writer { - if (message.compressionType !== 0) { - writer.uint32(8).int32(message.compressionType); - } - if (message.logFlushIntervalMessages !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logFlushIntervalMessages!, - }, - writer.uint32(18).fork() - ).ldelim(); - } - if (message.logFlushIntervalMs !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logFlushIntervalMs!, - }, - writer.uint32(26).fork() - ).ldelim(); - } - if (message.logFlushSchedulerIntervalMs !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logFlushSchedulerIntervalMs!, - }, - writer.uint32(34).fork() - ).ldelim(); - } - if (message.logRetentionBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logRetentionBytes!, - }, - writer.uint32(42).fork() - ).ldelim(); - } - if (message.logRetentionHours !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logRetentionHours!, - }, - writer.uint32(50).fork() - ).ldelim(); - } - if (message.logRetentionMinutes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logRetentionMinutes!, - }, - writer.uint32(58).fork() - ).ldelim(); - } - if (message.logRetentionMs !== undefined) { - Int64Value.encode( - { $type: "google.protobuf.Int64Value", value: message.logRetentionMs! }, - writer.uint32(66).fork() - ).ldelim(); - } - if (message.logSegmentBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.logSegmentBytes!, - }, - writer.uint32(74).fork() - ).ldelim(); - } - if (message.logPreallocate !== undefined) { - BoolValue.encode( - { $type: "google.protobuf.BoolValue", value: message.logPreallocate! }, - writer.uint32(82).fork() - ).ldelim(); - } - if (message.socketSendBufferBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.socketSendBufferBytes!, - }, - writer.uint32(90).fork() - ).ldelim(); - } - if (message.socketReceiveBufferBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.socketReceiveBufferBytes!, - }, - writer.uint32(98).fork() - ).ldelim(); - } - if (message.autoCreateTopicsEnable !== undefined) { - BoolValue.encode( - { - $type: "google.protobuf.BoolValue", - value: message.autoCreateTopicsEnable!, - }, - writer.uint32(106).fork() - ).ldelim(); - } - if (message.numPartitions !== undefined) { - Int64Value.encode( - { $type: "google.protobuf.Int64Value", value: message.numPartitions! }, - writer.uint32(114).fork() - ).ldelim(); - } - if (message.defaultReplicationFactor !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.defaultReplicationFactor!, - }, - writer.uint32(122).fork() - ).ldelim(); - } - if (message.messageMaxBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.messageMaxBytes!, - }, - writer.uint32(130).fork() - ).ldelim(); - } - if (message.replicaFetchMaxBytes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.replicaFetchMaxBytes!, - }, - writer.uint32(138).fork() - ).ldelim(); - } - for (const v of message.sslCipherSuites) { - writer.uint32(146).string(v!); - } - if (message.offsetsRetentionMinutes !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.offsetsRetentionMinutes!, - }, - writer.uint32(154).fork() - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): KafkaConfig3 { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseKafkaConfig3 } as KafkaConfig3; - message.sslCipherSuites = []; - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.compressionType = reader.int32() as any; - break; - case 2: - message.logFlushIntervalMessages = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 3: - message.logFlushIntervalMs = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 4: - message.logFlushSchedulerIntervalMs = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 5: - message.logRetentionBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 6: - message.logRetentionHours = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 7: - message.logRetentionMinutes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 8: - message.logRetentionMs = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 9: - message.logSegmentBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 10: - message.logPreallocate = BoolValue.decode( - reader, - reader.uint32() - ).value; - break; - case 11: - message.socketSendBufferBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 12: - message.socketReceiveBufferBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 13: - message.autoCreateTopicsEnable = BoolValue.decode( - reader, - reader.uint32() - ).value; - break; - case 14: - message.numPartitions = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 15: - message.defaultReplicationFactor = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 16: - message.messageMaxBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 17: - message.replicaFetchMaxBytes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 18: - message.sslCipherSuites.push(reader.string()); - break; - case 19: - message.offsetsRetentionMinutes = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): KafkaConfig3 { - const message = { ...baseKafkaConfig3 } as KafkaConfig3; - message.compressionType = - object.compressionType !== undefined && object.compressionType !== null - ? compressionTypeFromJSON(object.compressionType) - : 0; - message.logFlushIntervalMessages = - object.logFlushIntervalMessages !== undefined && - object.logFlushIntervalMessages !== null - ? Number(object.logFlushIntervalMessages) - : undefined; - message.logFlushIntervalMs = - object.logFlushIntervalMs !== undefined && - object.logFlushIntervalMs !== null - ? Number(object.logFlushIntervalMs) - : undefined; - message.logFlushSchedulerIntervalMs = - object.logFlushSchedulerIntervalMs !== undefined && - object.logFlushSchedulerIntervalMs !== null - ? Number(object.logFlushSchedulerIntervalMs) - : undefined; - message.logRetentionBytes = - object.logRetentionBytes !== undefined && - object.logRetentionBytes !== null - ? Number(object.logRetentionBytes) - : undefined; - message.logRetentionHours = - object.logRetentionHours !== undefined && - object.logRetentionHours !== null - ? Number(object.logRetentionHours) - : undefined; - message.logRetentionMinutes = - object.logRetentionMinutes !== undefined && - object.logRetentionMinutes !== null - ? Number(object.logRetentionMinutes) - : undefined; - message.logRetentionMs = - object.logRetentionMs !== undefined && object.logRetentionMs !== null - ? Number(object.logRetentionMs) - : undefined; - message.logSegmentBytes = - object.logSegmentBytes !== undefined && object.logSegmentBytes !== null - ? Number(object.logSegmentBytes) - : undefined; - message.logPreallocate = - object.logPreallocate !== undefined && object.logPreallocate !== null - ? Boolean(object.logPreallocate) - : undefined; - message.socketSendBufferBytes = - object.socketSendBufferBytes !== undefined && - object.socketSendBufferBytes !== null - ? Number(object.socketSendBufferBytes) - : undefined; - message.socketReceiveBufferBytes = - object.socketReceiveBufferBytes !== undefined && - object.socketReceiveBufferBytes !== null - ? Number(object.socketReceiveBufferBytes) - : undefined; - message.autoCreateTopicsEnable = - object.autoCreateTopicsEnable !== undefined && - object.autoCreateTopicsEnable !== null - ? Boolean(object.autoCreateTopicsEnable) - : undefined; - message.numPartitions = - object.numPartitions !== undefined && object.numPartitions !== null - ? Number(object.numPartitions) - : undefined; - message.defaultReplicationFactor = - object.defaultReplicationFactor !== undefined && - object.defaultReplicationFactor !== null - ? Number(object.defaultReplicationFactor) - : undefined; - message.messageMaxBytes = - object.messageMaxBytes !== undefined && object.messageMaxBytes !== null - ? Number(object.messageMaxBytes) - : undefined; - message.replicaFetchMaxBytes = - object.replicaFetchMaxBytes !== undefined && - object.replicaFetchMaxBytes !== null - ? Number(object.replicaFetchMaxBytes) - : undefined; - message.sslCipherSuites = (object.sslCipherSuites ?? []).map((e: any) => - String(e) - ); - message.offsetsRetentionMinutes = - object.offsetsRetentionMinutes !== undefined && - object.offsetsRetentionMinutes !== null - ? Number(object.offsetsRetentionMinutes) - : undefined; - return message; - }, - - toJSON(message: KafkaConfig3): unknown { - const obj: any = {}; - message.compressionType !== undefined && - (obj.compressionType = compressionTypeToJSON(message.compressionType)); - message.logFlushIntervalMessages !== undefined && - (obj.logFlushIntervalMessages = message.logFlushIntervalMessages); - message.logFlushIntervalMs !== undefined && - (obj.logFlushIntervalMs = message.logFlushIntervalMs); - message.logFlushSchedulerIntervalMs !== undefined && - (obj.logFlushSchedulerIntervalMs = message.logFlushSchedulerIntervalMs); - message.logRetentionBytes !== undefined && - (obj.logRetentionBytes = message.logRetentionBytes); - message.logRetentionHours !== undefined && - (obj.logRetentionHours = message.logRetentionHours); - message.logRetentionMinutes !== undefined && - (obj.logRetentionMinutes = message.logRetentionMinutes); - message.logRetentionMs !== undefined && - (obj.logRetentionMs = message.logRetentionMs); - message.logSegmentBytes !== undefined && - (obj.logSegmentBytes = message.logSegmentBytes); - message.logPreallocate !== undefined && - (obj.logPreallocate = message.logPreallocate); - message.socketSendBufferBytes !== undefined && - (obj.socketSendBufferBytes = message.socketSendBufferBytes); - message.socketReceiveBufferBytes !== undefined && - (obj.socketReceiveBufferBytes = message.socketReceiveBufferBytes); - message.autoCreateTopicsEnable !== undefined && - (obj.autoCreateTopicsEnable = message.autoCreateTopicsEnable); - message.numPartitions !== undefined && - (obj.numPartitions = message.numPartitions); - message.defaultReplicationFactor !== undefined && - (obj.defaultReplicationFactor = message.defaultReplicationFactor); - message.messageMaxBytes !== undefined && - (obj.messageMaxBytes = message.messageMaxBytes); - message.replicaFetchMaxBytes !== undefined && - (obj.replicaFetchMaxBytes = message.replicaFetchMaxBytes); - if (message.sslCipherSuites) { - obj.sslCipherSuites = message.sslCipherSuites.map((e) => e); + if (message.saslEnabledMechanisms) { + obj.saslEnabledMechanisms = message.saslEnabledMechanisms.map((e) => + saslMechanismToJSON(e) + ); } else { - obj.sslCipherSuites = []; + obj.saslEnabledMechanisms = []; } - message.offsetsRetentionMinutes !== undefined && - (obj.offsetsRetentionMinutes = message.offsetsRetentionMinutes); return obj; }, @@ -3588,6 +2633,8 @@ export const KafkaConfig3 = { message.sslCipherSuites = object.sslCipherSuites?.map((e) => e) || []; message.offsetsRetentionMinutes = object.offsetsRetentionMinutes ?? undefined; + message.saslEnabledMechanisms = + object.saslEnabledMechanisms?.map((e) => e) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/common.ts b/src/generated/yandex/cloud/mdb/kafka/v1/common.ts index 70b5d540..f558d2ac 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/common.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/common.ts @@ -72,6 +72,44 @@ export function compressionTypeToJSON(object: CompressionType): string { } } +export enum SaslMechanism { + SASL_MECHANISM_UNSPECIFIED = 0, + SASL_MECHANISM_SCRAM_SHA_256 = 1, + SASL_MECHANISM_SCRAM_SHA_512 = 2, + UNRECOGNIZED = -1, +} + +export function saslMechanismFromJSON(object: any): SaslMechanism { + switch (object) { + case 0: + case "SASL_MECHANISM_UNSPECIFIED": + return SaslMechanism.SASL_MECHANISM_UNSPECIFIED; + case 1: + case "SASL_MECHANISM_SCRAM_SHA_256": + return SaslMechanism.SASL_MECHANISM_SCRAM_SHA_256; + case 2: + case "SASL_MECHANISM_SCRAM_SHA_512": + return SaslMechanism.SASL_MECHANISM_SCRAM_SHA_512; + case -1: + case "UNRECOGNIZED": + default: + return SaslMechanism.UNRECOGNIZED; + } +} + +export function saslMechanismToJSON(object: SaslMechanism): string { + switch (object) { + case SaslMechanism.SASL_MECHANISM_UNSPECIFIED: + return "SASL_MECHANISM_UNSPECIFIED"; + case SaslMechanism.SASL_MECHANISM_SCRAM_SHA_256: + return "SASL_MECHANISM_SCRAM_SHA_256"; + case SaslMechanism.SASL_MECHANISM_SCRAM_SHA_512: + return "SASL_MECHANISM_SCRAM_SHA_512"; + default: + return "UNKNOWN"; + } +} + if (_m0.util.Long !== Long) { _m0.util.Long = Long as any; _m0.configure(); diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/topic.ts b/src/generated/yandex/cloud/mdb/kafka/v1/topic.ts index 3c9e29b6..a9aca345 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/topic.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/topic.ts @@ -29,7 +29,9 @@ export interface Topic { partitions?: number; /** Amount of data copies (replicas) for the topic in the cluster. */ replicationFactor?: number; + /** @deprecated */ topicConfig21?: Topicconfig21 | undefined; + /** @deprecated */ topicConfig26?: Topicconfig26 | undefined; topicConfig28?: Topicconfig28 | undefined; topicConfig3?: TopicConfig3 | undefined; @@ -43,13 +45,19 @@ export interface TopicSpec { partitions?: number; /** Amount of copies of a topic data kept in the cluster. */ replicationFactor?: number; + /** @deprecated */ topicConfig21?: Topicconfig21 | undefined; + /** @deprecated */ topicConfig26?: Topicconfig26 | undefined; topicConfig28?: Topicconfig28 | undefined; topicConfig3?: TopicConfig3 | undefined; } -/** A topic settings for 2.1. */ +/** + * Deprecated. Version `2.1` of Kafka not supported in Yandex Cloud. + * + * @deprecated + */ export interface Topicconfig21 { $type: "yandex.cloud.mdb.kafka.v1.TopicConfig2_1"; /** Retention policy to use on old log messages. */ @@ -160,7 +168,11 @@ export function topicconfig21_CleanupPolicyToJSON( } } -/** A topic settings for 2.6 */ +/** + * Deprecated. Version `2.6` of Kafka not supported in Yandex Cloud. + * + * @deprecated + */ export interface Topicconfig26 { $type: "yandex.cloud.mdb.kafka.v1.TopicConfig2_6"; /** Retention policy to use on old log messages. */ diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/user.ts b/src/generated/yandex/cloud/mdb/kafka/v1/user.ts index 4dc9e1f3..40acaa2f 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/user.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/user.ts @@ -43,6 +43,15 @@ export interface Permission { topicName: string; /** Access role type to grant to the user. */ role: Permission_AccessRole; + /** + * Lists hosts allowed for this permission. + * When not defined, access from any host is allowed. + * + * Bare in mind that the same host might appear in multiple permissions at the same time, + * hence removing individual permission doesn't automatically restricts access from the [allow_hosts] of the permission. + * If the same host(s) is listed for another permission of the same principal/topic, the host(s) remains allowed. + */ + allowHosts: string[]; } export enum Permission_AccessRole { @@ -281,6 +290,7 @@ const basePermission: object = { $type: "yandex.cloud.mdb.kafka.v1.Permission", topicName: "", role: 0, + allowHosts: "", }; export const Permission = { @@ -296,6 +306,9 @@ export const Permission = { if (message.role !== 0) { writer.uint32(16).int32(message.role); } + for (const v of message.allowHosts) { + writer.uint32(34).string(v!); + } return writer; }, @@ -303,6 +316,7 @@ export const Permission = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...basePermission } as Permission; + message.allowHosts = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -312,6 +326,9 @@ export const Permission = { case 2: message.role = reader.int32() as any; break; + case 4: + message.allowHosts.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -330,6 +347,7 @@ export const Permission = { object.role !== undefined && object.role !== null ? permission_AccessRoleFromJSON(object.role) : 0; + message.allowHosts = (object.allowHosts ?? []).map((e: any) => String(e)); return message; }, @@ -338,6 +356,11 @@ export const Permission = { message.topicName !== undefined && (obj.topicName = message.topicName); message.role !== undefined && (obj.role = permission_AccessRoleToJSON(message.role)); + if (message.allowHosts) { + obj.allowHosts = message.allowHosts.map((e) => e); + } else { + obj.allowHosts = []; + } return obj; }, @@ -347,6 +370,7 @@ export const Permission = { const message = { ...basePermission } as Permission; message.topicName = object.topicName ?? ""; message.role = object.role ?? 0; + message.allowHosts = object.allowHosts?.map((e) => e) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts b/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts index f0babe72..470e230e 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts @@ -23,6 +23,54 @@ export interface Backup { sourceClusterId: string; /** Start timestamp (the time when the backup operation was started). */ startedAt?: Date; + /** Size of backup, in bytes */ + size: number; + /** How this backup was created (manual/automatic/etc...) */ + type: Backup_BackupCreationType; +} + +export enum Backup_BackupCreationType { + BACKUP_CREATION_TYPE_UNSPECIFIED = 0, + /** AUTOMATED - Backup created by automated daily schedule */ + AUTOMATED = 1, + /** MANUAL - Backup created by user request */ + MANUAL = 2, + UNRECOGNIZED = -1, +} + +export function backup_BackupCreationTypeFromJSON( + object: any +): Backup_BackupCreationType { + switch (object) { + case 0: + case "BACKUP_CREATION_TYPE_UNSPECIFIED": + return Backup_BackupCreationType.BACKUP_CREATION_TYPE_UNSPECIFIED; + case 1: + case "AUTOMATED": + return Backup_BackupCreationType.AUTOMATED; + case 2: + case "MANUAL": + return Backup_BackupCreationType.MANUAL; + case -1: + case "UNRECOGNIZED": + default: + return Backup_BackupCreationType.UNRECOGNIZED; + } +} + +export function backup_BackupCreationTypeToJSON( + object: Backup_BackupCreationType +): string { + switch (object) { + case Backup_BackupCreationType.BACKUP_CREATION_TYPE_UNSPECIFIED: + return "BACKUP_CREATION_TYPE_UNSPECIFIED"; + case Backup_BackupCreationType.AUTOMATED: + return "AUTOMATED"; + case Backup_BackupCreationType.MANUAL: + return "MANUAL"; + default: + return "UNKNOWN"; + } } const baseBackup: object = { @@ -30,6 +78,8 @@ const baseBackup: object = { id: "", folderId: "", sourceClusterId: "", + size: 0, + type: 0, }; export const Backup = { @@ -60,6 +110,12 @@ export const Backup = { writer.uint32(42).fork() ).ldelim(); } + if (message.size !== 0) { + writer.uint32(48).int64(message.size); + } + if (message.type !== 0) { + writer.uint32(56).int32(message.type); + } return writer; }, @@ -89,6 +145,12 @@ export const Backup = { Timestamp.decode(reader, reader.uint32()) ); break; + case 6: + message.size = longToNumber(reader.int64() as Long); + break; + case 7: + message.type = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -117,6 +179,14 @@ export const Backup = { object.startedAt !== undefined && object.startedAt !== null ? fromJsonTimestamp(object.startedAt) : undefined; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + message.type = + object.type !== undefined && object.type !== null + ? backup_BackupCreationTypeFromJSON(object.type) + : 0; return message; }, @@ -130,6 +200,9 @@ export const Backup = { (obj.sourceClusterId = message.sourceClusterId); message.startedAt !== undefined && (obj.startedAt = message.startedAt.toISOString()); + message.size !== undefined && (obj.size = Math.round(message.size)); + message.type !== undefined && + (obj.type = backup_BackupCreationTypeToJSON(message.type)); return obj; }, @@ -140,12 +213,25 @@ export const Backup = { message.createdAt = object.createdAt ?? undefined; message.sourceClusterId = object.sourceClusterId ?? ""; message.startedAt = object.startedAt ?? undefined; + message.size = object.size ?? 0; + message.type = object.type ?? 0; return message; }, }; messageTypeRegistry.set(Backup.$type, Backup); +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + type Builtin = | Date | Function @@ -195,6 +281,13 @@ function fromJsonTimestamp(o: any): Date { } } +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + if (_m0.util.Long !== Long) { _m0.util.Long = Long as any; _m0.configure(); diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts index 44a19d46..bcf4b1c7 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts @@ -64,6 +64,20 @@ export interface ListBackupsResponse { nextPageToken: string; } +export interface DeleteBackupRequest { + $type: "yandex.cloud.mdb.mysql.v1.DeleteBackupRequest"; + /** Required. ID of the backup to delete. */ + backupId: string; +} + +export interface DeleteBackupMetadata { + $type: "yandex.cloud.mdb.mysql.v1.DeleteBackupMetadata"; + /** Required. ID of the MySQL backup that is currently being deleted. */ + backupId: string; + /** ID of the MySQL backup that is being deleted. */ + clusterId: string; +} + const baseGetBackupRequest: object = { $type: "yandex.cloud.mdb.mysql.v1.GetBackupRequest", backupId: "", @@ -296,6 +310,146 @@ export const ListBackupsResponse = { messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); +const baseDeleteBackupRequest: object = { + $type: "yandex.cloud.mdb.mysql.v1.DeleteBackupRequest", + backupId: "", +}; + +export const DeleteBackupRequest = { + $type: "yandex.cloud.mdb.mysql.v1.DeleteBackupRequest" as const, + + encode( + message: DeleteBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBackupRequest { + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: DeleteBackupRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBackupRequest { + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBackupRequest.$type, DeleteBackupRequest); + +const baseDeleteBackupMetadata: object = { + $type: "yandex.cloud.mdb.mysql.v1.DeleteBackupMetadata", + backupId: "", + clusterId: "", +}; + +export const DeleteBackupMetadata = { + $type: "yandex.cloud.mdb.mysql.v1.DeleteBackupMetadata" as const, + + encode( + message: DeleteBackupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + if (message.clusterId !== "") { + writer.uint32(18).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBackupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + case 2: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBackupMetadata { + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: DeleteBackupMetadata): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBackupMetadata { + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + message.backupId = object.backupId ?? ""; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBackupMetadata.$type, DeleteBackupMetadata); + /** * A set of methods for managing MySQL backups. * diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts b/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts index faba6f10..8f1f2975 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts @@ -352,6 +352,8 @@ export enum Host_Health { DEAD = 2, /** DEGRADED - Host is degraded, and can perform only some of its essential functions. */ DEGRADED = 3, + /** READONLY - Host is alive, but in read-only mode. */ + READONLY = 4, UNRECOGNIZED = -1, } @@ -369,6 +371,9 @@ export function host_HealthFromJSON(object: any): Host_Health { case 3: case "DEGRADED": return Host_Health.DEGRADED; + case 4: + case "READONLY": + return Host_Health.READONLY; case -1: case "UNRECOGNIZED": default: @@ -386,6 +391,8 @@ export function host_HealthToJSON(object: Host_Health): string { return "DEAD"; case Host_Health.DEGRADED: return "DEGRADED"; + case Host_Health.READONLY: + return "READONLY"; default: return "UNKNOWN"; } @@ -439,6 +446,8 @@ export enum Service_Health { ALIVE = 1, /** DEAD - The service is dead or unresponsive. */ DEAD = 2, + /** READONLY - The service is in read-only mode. */ + READONLY = 3, UNRECOGNIZED = -1, } @@ -453,6 +462,9 @@ export function service_HealthFromJSON(object: any): Service_Health { case 2: case "DEAD": return Service_Health.DEAD; + case 3: + case "READONLY": + return Service_Health.READONLY; case -1: case "UNRECOGNIZED": default: @@ -468,6 +480,8 @@ export function service_HealthToJSON(object: Service_Health): string { return "ALIVE"; case Service_Health.DEAD: return "DEAD"; + case Service_Health.READONLY: + return "READONLY"; default: return "UNKNOWN"; } diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts index 02c9fdb6..cec7e21f 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts @@ -214,6 +214,8 @@ export interface BackupClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.BackupClusterMetadata"; /** ID of the cluster that is being backed up. */ clusterId: string; + /** ID of the MySQL backup that is created. */ + backupId: string; } export interface RestoreClusterRequest { @@ -2181,6 +2183,7 @@ messageTypeRegistry.set(BackupClusterRequest.$type, BackupClusterRequest); const baseBackupClusterMetadata: object = { $type: "yandex.cloud.mdb.mysql.v1.BackupClusterMetadata", clusterId: "", + backupId: "", }; export const BackupClusterMetadata = { @@ -2193,6 +2196,9 @@ export const BackupClusterMetadata = { if (message.clusterId !== "") { writer.uint32(10).string(message.clusterId); } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } return writer; }, @@ -2209,6 +2215,9 @@ export const BackupClusterMetadata = { case 1: message.clusterId = reader.string(); break; + case 2: + message.backupId = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -2223,12 +2232,17 @@ export const BackupClusterMetadata = { object.clusterId !== undefined && object.clusterId !== null ? String(object.clusterId) : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; return message; }, toJSON(message: BackupClusterMetadata): unknown { const obj: any = {}; message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.backupId !== undefined && (obj.backupId = message.backupId); return obj; }, @@ -2237,6 +2251,7 @@ export const BackupClusterMetadata = { ): BackupClusterMetadata { const message = { ...baseBackupClusterMetadata } as BackupClusterMetadata; message.clusterId = object.clusterId ?? ""; + message.backupId = object.backupId ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts index 7fff574c..a6ed8dcb 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts @@ -40,7 +40,7 @@ export interface Mysqlconfig57 { /** * Enable writing of audit log of MySQL. * - * See [MySQL documentation](https://dev.mysql.com/doc/mysql-security-excerpt/5.6/en/audit-log-options-variables.html#option_mysqld_audit-log) for details. + * See [MySQL documentation](https://dev.mysql.com/doc/mysql-security-excerpt/5.7/en/audit-log-reference.html#audit-log-options-variables) for details. */ auditLog?: boolean; /** @@ -58,145 +58,145 @@ export interface Mysqlconfig57 { /** * Authentication plugin used in the managed MySQL cluster. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_default_authentication_plugin for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_default_authentication_plugin) for details. */ defaultAuthenticationPlugin: Mysqlconfig57_AuthPlugin; /** * Transaction log flush behaviour. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit) for details. */ innodbFlushLogAtTrxCommit?: number; /** * Max time in seconds for a transaction to wait for a row lock. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout) for details. */ innodbLockWaitTimeout?: number; /** * Default transaction isolation level. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation) for details. */ transactionIsolation: Mysqlconfig57_TransactionIsolation; /** * Print information about deadlocks in error log. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks) for details. */ innodbPrintAllDeadlocks?: boolean; /** * The number of seconds to wait for more data from a connection before aborting the read. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_read_timeout for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_read_timeout) for details. */ netReadTimeout?: number; /** * The number of seconds to wait for a block to be written to a connection before aborting the write. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_write_timeout for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_write_timeout) for details. */ netWriteTimeout?: number; /** * The maximum permitted result length in bytes for the GROUP_CONCAT() function. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_group_concat_max_len for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_group_concat_max_len) for details. */ groupConcatMaxLen?: number; /** * The maximum size of internal in-memory temporary tables. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_tmp_table_size for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_tmp_table_size) for details. */ tmpTableSize?: number; /** * This variable sets the maximum size to which user-created MEMORY tables are permitted to grow. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_heap_table_size for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_heap_table_size) for details. */ maxHeapTableSize?: number; /** * The servers default time zone. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_default-time-zone for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_default-time-zone) for details. */ defaultTimeZone: string; /** * The servers default character set. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_character_set_server for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_character_set_server) for details. */ characterSetServer: string; /** * The server default collation. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_collation_server for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_collation_server) for details. */ collationServer: string; /** * Enables InnoDB adaptive hash index. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index) for details. */ innodbAdaptiveHashIndex?: boolean; /** * Enables the NUMA interleave memory policy for allocation of the InnoDB buffer pool. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_numa_interleave for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_numa_interleave) for details. */ innodbNumaInterleave?: boolean; /** * The size in bytes of the buffer that InnoDB uses to write to the log files on disk. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_buffer_size for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_buffer_size) for details. */ innodbLogBufferSize?: number; /** * The size in bytes of the single InnoDB Redo log file. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_file_size for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_file_size) for details. */ innodbLogFileSize?: number; /** * Limits IO available for InnoDB background tasks. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity) for details. */ innodbIoCapacity?: number; /** * Limits IO available for InnoDB background tasks. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity_max for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity_max) for details. */ innodbIoCapacityMax?: number; /** * The number of I/O threads for read operations in InnoDB. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_read_io_threads for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_read_io_threads) for details. */ innodbReadIoThreads?: number; /** * The number of I/O threads for write operations in InnoDB. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_write_io_threads for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_write_io_threads) for details. */ innodbWriteIoThreads?: number; /** * The number of background threads devoted to the InnoDB purge operation. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_purge_threads for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_purge_threads) for details. */ innodbPurgeThreads?: number; /** * Defines the maximum number of threads permitted inside of InnoDB. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_thread_concurrency for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_thread_concurrency) for details. */ innodbThreadConcurrency?: number; /** * Limits the max size of InnoDB temp tablespace. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path) for details. */ innodbTempDataFileMaxSize?: number; /** @@ -250,13 +250,13 @@ export interface Mysqlconfig57 { /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_auto_increment_increment) for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-source.html#sysvar_auto_increment_increment) for details. */ autoIncrementIncrement?: number; /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_auto_increment_offset) for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-source.html#sysvar_auto_increment_offset) for details. */ autoIncrementOffset?: number; /** @@ -292,7 +292,7 @@ export interface Mysqlconfig57 { /** * The number of replica acknowledgments the source must receive per transaction before proceeding. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_rpl_semi_sync_master_wait_for_slave_count) for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-source.html#sysvar_rpl_semi_sync_master_wait_for_slave_count) for details. */ rplSemiSyncMasterWaitForSlaveCount?: number; /** @@ -426,9 +426,94 @@ export interface Mysqlconfig57 { /** * Specifies how the source mysqld generates the dependency information that it writes in the binary log to help replicas determine which transactions can be executed in parallel. * - * For details, see [MySQL documentation for the variabl](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_transaction_dependency_tracking). + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_transaction_dependency_tracking). */ binlogTransactionDependencyTracking: Mysqlconfig57_BinlogTransactionDependencyTracking; + /** + * Config specific will be all changes to a table take effect immediately or you must use COMMIT to accept a transaction or ROLLBACK to cancel it. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_autocommit). + */ + autocommit?: boolean; + /** + * Enables or disables periodic output for the standard InnoDB Monitor. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_status_output). + */ + innodbStatusOutput?: boolean; + /** + * When innodb_strict_mode is enabled, InnoDB returns errors rather than warnings when checking for invalid or incompatible table options. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_strict_mode). + */ + innodbStrictMode?: boolean; + /** + * Makes InnoDB to write information about all lock wait timeout errors into the log file. + * + * For details, see [Percona documentation for the variable](https://docs.percona.com/percona-server/5.7/diagnostics/innodb_show_status.html?highlight=innodb_print_lock_wait_timeout_info). + */ + innodbPrintLockWaitTimeoutInfo?: boolean; + /** + * System variable specifies the verbosity for handling events intended for the error log + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_log_error_verbosity). + */ + logErrorVerbosity?: number; + /** + * The maximum number of bytes of memory reserved per session for computation of normalized statement digests. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_digest_length). + */ + maxDigestLength?: number; + /** + * Do not cache results that are larger than this number of bytes. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_query_cache_limit). + */ + queryCacheLimit?: number; + /** + * The amount of memory allocated for caching query results. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_query_cache_size). + */ + queryCacheSize?: number; + /** + * Set the query cache type. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_query_cache_type). + */ + queryCacheType?: number; + /** + * // This variable specifies the timeout in seconds for attempts to acquire metadata locks + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_lock_wait_timeout). + */ + lockWaitTimeout?: number; + /** + * This variable limits the total number of prepared statements in the server. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_prepared_stmt_count). + */ + maxPreparedStmtCount?: number; + /** + * The system variable enables control over optimizer behavior. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_optimizer_switch) + * https://dev.mysql.com/doc/refman/5.7/en/switchable-optimizations.html + */ + optimizerSwitch: string; + /** + * The maximum depth of search performed by the query optimizer + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html) + */ + optimizerSearchDepth?: number; + /** + * Enables and disables collection of query times + * + * For details, see [Percona documentation for the variable](https://docs.percona.com/percona-server/5.7/diagnostics/response_time_distribution.html#query_response_time_stats). + */ + queryResponseTimeStats?: boolean; } export enum Mysqlconfig57_SQLMode { @@ -1018,6 +1103,7 @@ const baseMysqlconfig57: object = { logSlowRateType: 0, logSlowFilter: 0, binlogTransactionDependencyTracking: 0, + optimizerSwitch: "", }; export const Mysqlconfig57 = { @@ -1562,6 +1648,117 @@ export const Mysqlconfig57 = { if (message.binlogTransactionDependencyTracking !== 0) { writer.uint32(568).int32(message.binlogTransactionDependencyTracking); } + if (message.autocommit !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.autocommit! }, + writer.uint32(578).fork() + ).ldelim(); + } + if (message.innodbStatusOutput !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.innodbStatusOutput!, + }, + writer.uint32(586).fork() + ).ldelim(); + } + if (message.innodbStrictMode !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.innodbStrictMode!, + }, + writer.uint32(594).fork() + ).ldelim(); + } + if (message.innodbPrintLockWaitTimeoutInfo !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.innodbPrintLockWaitTimeoutInfo!, + }, + writer.uint32(602).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logErrorVerbosity!, + }, + writer.uint32(610).fork() + ).ldelim(); + } + if (message.maxDigestLength !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxDigestLength!, + }, + writer.uint32(618).fork() + ).ldelim(); + } + if (message.queryCacheLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.queryCacheLimit!, + }, + writer.uint32(626).fork() + ).ldelim(); + } + if (message.queryCacheSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.queryCacheSize! }, + writer.uint32(634).fork() + ).ldelim(); + } + if (message.queryCacheType !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.queryCacheType! }, + writer.uint32(642).fork() + ).ldelim(); + } + if (message.lockWaitTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.lockWaitTimeout!, + }, + writer.uint32(650).fork() + ).ldelim(); + } + if (message.maxPreparedStmtCount !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedStmtCount!, + }, + writer.uint32(658).fork() + ).ldelim(); + } + if (message.optimizerSwitch !== "") { + writer.uint32(666).string(message.optimizerSwitch); + } + if (message.optimizerSearchDepth !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.optimizerSearchDepth!, + }, + writer.uint32(674).fork() + ).ldelim(); + } + if (message.queryResponseTimeStats !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.queryResponseTimeStats!, + }, + writer.uint32(682).fork() + ).ldelim(); + } return writer; }, @@ -1972,6 +2169,84 @@ export const Mysqlconfig57 = { case 71: message.binlogTransactionDependencyTracking = reader.int32() as any; break; + case 72: + message.autocommit = BoolValue.decode(reader, reader.uint32()).value; + break; + case 73: + message.innodbStatusOutput = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 74: + message.innodbStrictMode = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 75: + message.innodbPrintLockWaitTimeoutInfo = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 76: + message.logErrorVerbosity = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 77: + message.maxDigestLength = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 78: + message.queryCacheLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 79: + message.queryCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 80: + message.queryCacheType = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 81: + message.lockWaitTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 82: + message.maxPreparedStmtCount = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 83: + message.optimizerSwitch = reader.string(); + break; + case 84: + message.optimizerSearchDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 85: + message.queryResponseTimeStats = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2312,6 +2587,68 @@ export const Mysqlconfig57 = { object.binlogTransactionDependencyTracking ) : 0; + message.autocommit = + object.autocommit !== undefined && object.autocommit !== null + ? Boolean(object.autocommit) + : undefined; + message.innodbStatusOutput = + object.innodbStatusOutput !== undefined && + object.innodbStatusOutput !== null + ? Boolean(object.innodbStatusOutput) + : undefined; + message.innodbStrictMode = + object.innodbStrictMode !== undefined && object.innodbStrictMode !== null + ? Boolean(object.innodbStrictMode) + : undefined; + message.innodbPrintLockWaitTimeoutInfo = + object.innodbPrintLockWaitTimeoutInfo !== undefined && + object.innodbPrintLockWaitTimeoutInfo !== null + ? Boolean(object.innodbPrintLockWaitTimeoutInfo) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? Number(object.logErrorVerbosity) + : undefined; + message.maxDigestLength = + object.maxDigestLength !== undefined && object.maxDigestLength !== null + ? Number(object.maxDigestLength) + : undefined; + message.queryCacheLimit = + object.queryCacheLimit !== undefined && object.queryCacheLimit !== null + ? Number(object.queryCacheLimit) + : undefined; + message.queryCacheSize = + object.queryCacheSize !== undefined && object.queryCacheSize !== null + ? Number(object.queryCacheSize) + : undefined; + message.queryCacheType = + object.queryCacheType !== undefined && object.queryCacheType !== null + ? Number(object.queryCacheType) + : undefined; + message.lockWaitTimeout = + object.lockWaitTimeout !== undefined && object.lockWaitTimeout !== null + ? Number(object.lockWaitTimeout) + : undefined; + message.maxPreparedStmtCount = + object.maxPreparedStmtCount !== undefined && + object.maxPreparedStmtCount !== null + ? Number(object.maxPreparedStmtCount) + : undefined; + message.optimizerSwitch = + object.optimizerSwitch !== undefined && object.optimizerSwitch !== null + ? String(object.optimizerSwitch) + : ""; + message.optimizerSearchDepth = + object.optimizerSearchDepth !== undefined && + object.optimizerSearchDepth !== null + ? Number(object.optimizerSearchDepth) + : undefined; + message.queryResponseTimeStats = + object.queryResponseTimeStats !== undefined && + object.queryResponseTimeStats !== null + ? Boolean(object.queryResponseTimeStats) + : undefined; return message; }, @@ -2478,6 +2815,34 @@ export const Mysqlconfig57 = { mysqlconfig57_BinlogTransactionDependencyTrackingToJSON( message.binlogTransactionDependencyTracking )); + message.autocommit !== undefined && (obj.autocommit = message.autocommit); + message.innodbStatusOutput !== undefined && + (obj.innodbStatusOutput = message.innodbStatusOutput); + message.innodbStrictMode !== undefined && + (obj.innodbStrictMode = message.innodbStrictMode); + message.innodbPrintLockWaitTimeoutInfo !== undefined && + (obj.innodbPrintLockWaitTimeoutInfo = + message.innodbPrintLockWaitTimeoutInfo); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = message.logErrorVerbosity); + message.maxDigestLength !== undefined && + (obj.maxDigestLength = message.maxDigestLength); + message.queryCacheLimit !== undefined && + (obj.queryCacheLimit = message.queryCacheLimit); + message.queryCacheSize !== undefined && + (obj.queryCacheSize = message.queryCacheSize); + message.queryCacheType !== undefined && + (obj.queryCacheType = message.queryCacheType); + message.lockWaitTimeout !== undefined && + (obj.lockWaitTimeout = message.lockWaitTimeout); + message.maxPreparedStmtCount !== undefined && + (obj.maxPreparedStmtCount = message.maxPreparedStmtCount); + message.optimizerSwitch !== undefined && + (obj.optimizerSwitch = message.optimizerSwitch); + message.optimizerSearchDepth !== undefined && + (obj.optimizerSearchDepth = message.optimizerSearchDepth); + message.queryResponseTimeStats !== undefined && + (obj.queryResponseTimeStats = message.queryResponseTimeStats); return obj; }, @@ -2574,6 +2939,21 @@ export const Mysqlconfig57 = { message.innodbCompressionLevel = object.innodbCompressionLevel ?? undefined; message.binlogTransactionDependencyTracking = object.binlogTransactionDependencyTracking ?? 0; + message.autocommit = object.autocommit ?? undefined; + message.innodbStatusOutput = object.innodbStatusOutput ?? undefined; + message.innodbStrictMode = object.innodbStrictMode ?? undefined; + message.innodbPrintLockWaitTimeoutInfo = + object.innodbPrintLockWaitTimeoutInfo ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? undefined; + message.maxDigestLength = object.maxDigestLength ?? undefined; + message.queryCacheLimit = object.queryCacheLimit ?? undefined; + message.queryCacheSize = object.queryCacheSize ?? undefined; + message.queryCacheType = object.queryCacheType ?? undefined; + message.lockWaitTimeout = object.lockWaitTimeout ?? undefined; + message.maxPreparedStmtCount = object.maxPreparedStmtCount ?? undefined; + message.optimizerSwitch = object.optimizerSwitch ?? ""; + message.optimizerSearchDepth = object.optimizerSearchDepth ?? undefined; + message.queryResponseTimeStats = object.queryResponseTimeStats ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts index 86781b6c..bf0064df 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts @@ -58,145 +58,145 @@ export interface Mysqlconfig80 { /** * Authentication plugin used in the managed MySQL cluster. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_default_authentication_plugin for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_default_authentication_plugin) for details. */ defaultAuthenticationPlugin: Mysqlconfig80_AuthPlugin; /** * Transaction log flush behaviour. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit) for details. */ innodbFlushLogAtTrxCommit?: number; /** * Max time in seconds for a transaction to wait for a row lock. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout) for details. */ innodbLockWaitTimeout?: number; /** * Default transaction isolation level. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_transaction_isolation for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_transaction_isolation) for details. */ transactionIsolation: Mysqlconfig80_TransactionIsolation; /** * Print information about deadlocks in error log. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks) for details. */ innodbPrintAllDeadlocks?: boolean; /** * The number of seconds to wait for more data from a connection before aborting the read. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_read_timeout for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_read_timeout) for details. */ netReadTimeout?: number; /** * The number of seconds to wait for a block to be written to a connection before aborting the write. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_write_timeout for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_write_timeout) for details. */ netWriteTimeout?: number; /** * The maximum permitted result length in bytes for the GROUP_CONCAT() function. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_group_concat_max_len for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_group_concat_max_len) for details. */ groupConcatMaxLen?: number; /** * The maximum size of internal in-memory temporary tables. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_tmp_table_size for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_tmp_table_size) for details. */ tmpTableSize?: number; /** * This variable sets the maximum size to which user-created MEMORY tables are permitted to grow. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_heap_table_size for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_heap_table_size) for details. */ maxHeapTableSize?: number; /** * The servers default time zone. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-options.html#option_mysqld_default-time-zone for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-options.html#option_mysqld_default-time-zone) for details. */ defaultTimeZone: string; /** * The servers default character set. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_character_set_server for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_character_set_server) for details. */ characterSetServer: string; /** * The server default collation. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_collation_server for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_collation_server) for details. */ collationServer: string; /** * Enables InnoDB adaptive hash index. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index) for details. */ innodbAdaptiveHashIndex?: boolean; /** * Enables the NUMA interleave memory policy for allocation of the InnoDB buffer pool. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_numa_interleave for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_numa_interleave) for details. */ innodbNumaInterleave?: boolean; /** * The size in bytes of the buffer that InnoDB uses to write to the log files on disk. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_buffer_size for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_buffer_size) for details. */ innodbLogBufferSize?: number; /** * The size in bytes of the single InnoDB Redo log file. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size) for details. */ innodbLogFileSize?: number; /** * Limits IO available for InnoDB background tasks. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity) for details. */ innodbIoCapacity?: number; /** * Limits IO available for InnoDB background tasks. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity_max for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity_max) for details. */ innodbIoCapacityMax?: number; /** * The number of I/O threads for read operations in InnoDB. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_read_io_threads for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_read_io_threads) for details. */ innodbReadIoThreads?: number; /** * The number of I/O threads for write operations in InnoDB. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_write_io_threads for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_write_io_threads) for details. */ innodbWriteIoThreads?: number; /** * The number of background threads devoted to the InnoDB purge operation. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_purge_threads for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_purge_threads) for details. */ innodbPurgeThreads?: number; /** * Defines the maximum number of threads permitted inside of InnoDB. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_thread_concurrency for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_thread_concurrency) for details. */ innodbThreadConcurrency?: number; /** * Limits the max size of InnoDB temp tablespace. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path) for details. */ innodbTempDataFileMaxSize?: number; /** @@ -250,13 +250,13 @@ export interface Mysqlconfig80 { /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_increment) for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-source.html#sysvar_auto_increment_increment) for details. */ autoIncrementIncrement?: number; /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_offset) for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-source.html#sysvar_auto_increment_offset) for details. */ autoIncrementOffset?: number; /** @@ -292,7 +292,7 @@ export interface Mysqlconfig80 { /** * The number of replica acknowledgments the source must receive per transaction before proceeding. * - * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_rpl_semi_sync_master_wait_for_slave_count) for details. + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-source.html#sysvar_rpl_semi_sync_master_wait_for_slave_count) for details. */ rplSemiSyncMasterWaitForSlaveCount?: number; /** @@ -426,9 +426,70 @@ export interface Mysqlconfig80 { /** * Specifies how the source mysqld generates the dependency information that it writes in the binary log to help replicas determine which transactions can be executed in parallel. * - * For details, see [MySQL documentation for the variabl](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_transaction_dependency_tracking). + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_transaction_dependency_tracking). */ binlogTransactionDependencyTracking: Mysqlconfig80_BinlogTransactionDependencyTracking; + /** + * Config specific will be all changes to a table take effect immediately or you must use COMMIT to accept a transaction or ROLLBACK to cancel it. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_autocommit). + */ + autocommit?: boolean; + /** + * Enables or disables periodic output for the standard InnoDB Monitor. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_status_output). + */ + innodbStatusOutput?: boolean; + /** + * When innodb_strict_mode is enabled, InnoDB returns errors rather than warnings when checking for invalid or incompatible table options. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_strict_mode). + */ + innodbStrictMode?: boolean; + /** + * Makes InnoDB to write information about all lock wait timeout errors into the log file. + * + * For details, see [Percona documentation for the variable](https://docs.percona.com/percona-server/8.0/diagnostics/innodb_show_status.html?highlight=innodb_print_lock_wait_timeout_info). + */ + innodbPrintLockWaitTimeoutInfo?: boolean; + /** + * System variable specifies the verbosity for handling events intended for the error log + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_log_error_verbosity). + */ + logErrorVerbosity?: number; + /** + * The maximum number of bytes of memory reserved per session for computation of normalized statement digests. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_digest_length). + */ + maxDigestLength?: number; + /** + * This variable specifies the timeout in seconds for attempts to acquire metadata locks + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_lock_wait_timeout). + */ + lockWaitTimeout?: number; + /** + * This variable limits the total number of prepared statements in the server. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_prepared_stmt_count). + */ + maxPreparedStmtCount?: number; + /** + * The system variable enables control over optimizer behavior. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_optimizer_switch) + * https://dev.mysql.com/doc/refman/8.0/en/switchable-optimizations.html + */ + optimizerSwitch: string; + /** + * The maximum depth of search performed by the query optimizer + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html) + */ + optimizerSearchDepth?: number; } export enum Mysqlconfig80_SQLMode { @@ -958,6 +1019,7 @@ const baseMysqlconfig80: object = { logSlowRateType: 0, logSlowFilter: 0, binlogTransactionDependencyTracking: 0, + optimizerSwitch: "", }; export const Mysqlconfig80 = { @@ -1502,6 +1564,87 @@ export const Mysqlconfig80 = { if (message.binlogTransactionDependencyTracking !== 0) { writer.uint32(568).int32(message.binlogTransactionDependencyTracking); } + if (message.autocommit !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.autocommit! }, + writer.uint32(578).fork() + ).ldelim(); + } + if (message.innodbStatusOutput !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.innodbStatusOutput!, + }, + writer.uint32(586).fork() + ).ldelim(); + } + if (message.innodbStrictMode !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.innodbStrictMode!, + }, + writer.uint32(594).fork() + ).ldelim(); + } + if (message.innodbPrintLockWaitTimeoutInfo !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.innodbPrintLockWaitTimeoutInfo!, + }, + writer.uint32(602).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logErrorVerbosity!, + }, + writer.uint32(610).fork() + ).ldelim(); + } + if (message.maxDigestLength !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxDigestLength!, + }, + writer.uint32(618).fork() + ).ldelim(); + } + if (message.lockWaitTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.lockWaitTimeout!, + }, + writer.uint32(626).fork() + ).ldelim(); + } + if (message.maxPreparedStmtCount !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedStmtCount!, + }, + writer.uint32(634).fork() + ).ldelim(); + } + if (message.optimizerSwitch !== "") { + writer.uint32(642).string(message.optimizerSwitch); + } + if (message.optimizerSearchDepth !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.optimizerSearchDepth!, + }, + writer.uint32(650).fork() + ).ldelim(); + } return writer; }, @@ -1912,6 +2055,60 @@ export const Mysqlconfig80 = { case 71: message.binlogTransactionDependencyTracking = reader.int32() as any; break; + case 72: + message.autocommit = BoolValue.decode(reader, reader.uint32()).value; + break; + case 73: + message.innodbStatusOutput = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 74: + message.innodbStrictMode = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 75: + message.innodbPrintLockWaitTimeoutInfo = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 76: + message.logErrorVerbosity = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 77: + message.maxDigestLength = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 78: + message.lockWaitTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 79: + message.maxPreparedStmtCount = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 80: + message.optimizerSwitch = reader.string(); + break; + case 81: + message.optimizerSearchDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2251,6 +2448,51 @@ export const Mysqlconfig80 = { object.binlogTransactionDependencyTracking ) : 0; + message.autocommit = + object.autocommit !== undefined && object.autocommit !== null + ? Boolean(object.autocommit) + : undefined; + message.innodbStatusOutput = + object.innodbStatusOutput !== undefined && + object.innodbStatusOutput !== null + ? Boolean(object.innodbStatusOutput) + : undefined; + message.innodbStrictMode = + object.innodbStrictMode !== undefined && object.innodbStrictMode !== null + ? Boolean(object.innodbStrictMode) + : undefined; + message.innodbPrintLockWaitTimeoutInfo = + object.innodbPrintLockWaitTimeoutInfo !== undefined && + object.innodbPrintLockWaitTimeoutInfo !== null + ? Boolean(object.innodbPrintLockWaitTimeoutInfo) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? Number(object.logErrorVerbosity) + : undefined; + message.maxDigestLength = + object.maxDigestLength !== undefined && object.maxDigestLength !== null + ? Number(object.maxDigestLength) + : undefined; + message.lockWaitTimeout = + object.lockWaitTimeout !== undefined && object.lockWaitTimeout !== null + ? Number(object.lockWaitTimeout) + : undefined; + message.maxPreparedStmtCount = + object.maxPreparedStmtCount !== undefined && + object.maxPreparedStmtCount !== null + ? Number(object.maxPreparedStmtCount) + : undefined; + message.optimizerSwitch = + object.optimizerSwitch !== undefined && object.optimizerSwitch !== null + ? String(object.optimizerSwitch) + : ""; + message.optimizerSearchDepth = + object.optimizerSearchDepth !== undefined && + object.optimizerSearchDepth !== null + ? Number(object.optimizerSearchDepth) + : undefined; return message; }, @@ -2417,6 +2659,26 @@ export const Mysqlconfig80 = { mysqlconfig80_BinlogTransactionDependencyTrackingToJSON( message.binlogTransactionDependencyTracking )); + message.autocommit !== undefined && (obj.autocommit = message.autocommit); + message.innodbStatusOutput !== undefined && + (obj.innodbStatusOutput = message.innodbStatusOutput); + message.innodbStrictMode !== undefined && + (obj.innodbStrictMode = message.innodbStrictMode); + message.innodbPrintLockWaitTimeoutInfo !== undefined && + (obj.innodbPrintLockWaitTimeoutInfo = + message.innodbPrintLockWaitTimeoutInfo); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = message.logErrorVerbosity); + message.maxDigestLength !== undefined && + (obj.maxDigestLength = message.maxDigestLength); + message.lockWaitTimeout !== undefined && + (obj.lockWaitTimeout = message.lockWaitTimeout); + message.maxPreparedStmtCount !== undefined && + (obj.maxPreparedStmtCount = message.maxPreparedStmtCount); + message.optimizerSwitch !== undefined && + (obj.optimizerSwitch = message.optimizerSwitch); + message.optimizerSearchDepth !== undefined && + (obj.optimizerSearchDepth = message.optimizerSearchDepth); return obj; }, @@ -2513,6 +2775,17 @@ export const Mysqlconfig80 = { message.innodbCompressionLevel = object.innodbCompressionLevel ?? undefined; message.binlogTransactionDependencyTracking = object.binlogTransactionDependencyTracking ?? 0; + message.autocommit = object.autocommit ?? undefined; + message.innodbStatusOutput = object.innodbStatusOutput ?? undefined; + message.innodbStrictMode = object.innodbStrictMode ?? undefined; + message.innodbPrintLockWaitTimeoutInfo = + object.innodbPrintLockWaitTimeoutInfo ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? undefined; + message.maxDigestLength = object.maxDigestLength ?? undefined; + message.lockWaitTimeout = object.lockWaitTimeout ?? undefined; + message.maxPreparedStmtCount = object.maxPreparedStmtCount ?? undefined; + message.optimizerSwitch = object.optimizerSwitch ?? ""; + message.optimizerSearchDepth = object.optimizerSearchDepth ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/user.ts b/src/generated/yandex/cloud/mdb/mysql/v1/user.ts index 54827507..f3099055 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/user.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/user.ts @@ -22,6 +22,17 @@ export enum GlobalPermission { * You can always see your own threads. The `PROCESS` privilege also enables use of `SHOW ENGINE`. */ PROCESS = 3, + /** FLUSH_OPTIMIZER_COSTS - Enables use of the `FLUSH OPTIMIZER_COSTS` statement. */ + FLUSH_OPTIMIZER_COSTS = 4, + /** + * SHOW_ROUTINE - Enables a user to access definitions and properties of all stored routines (stored procedures and functions), even those for which the user is not named as the routine DEFINER. + * This access includes: + * The contents of the Information Schema `ROUTINES` table. + * The `SHOW CREATE FUNCTION` and `SHOW CREATE PROCEDURE` statements. + * The `SHOW FUNCTION CODE` and `SHOW PROCEDURE CODE` statements. + * The SHOW `FUNCTION STATUS` and `SHOW PROCEDURE STATUS` statements. + */ + SHOW_ROUTINE = 5, UNRECOGNIZED = -1, } @@ -39,6 +50,12 @@ export function globalPermissionFromJSON(object: any): GlobalPermission { case 3: case "PROCESS": return GlobalPermission.PROCESS; + case 4: + case "FLUSH_OPTIMIZER_COSTS": + return GlobalPermission.FLUSH_OPTIMIZER_COSTS; + case 5: + case "SHOW_ROUTINE": + return GlobalPermission.SHOW_ROUTINE; case -1: case "UNRECOGNIZED": default: @@ -56,6 +73,10 @@ export function globalPermissionToJSON(object: GlobalPermission): string { return "REPLICATION_SLAVE"; case GlobalPermission.PROCESS: return "PROCESS"; + case GlobalPermission.FLUSH_OPTIMIZER_COSTS: + return "FLUSH_OPTIMIZER_COSTS"; + case GlobalPermission.SHOW_ROUTINE: + return "SHOW_ROUTINE"; default: return "UNKNOWN"; } diff --git a/src/generated/yandex/cloud/mdb/opensearch/v1/auth.ts b/src/generated/yandex/cloud/mdb/opensearch/v1/auth.ts new file mode 100644 index 00000000..5a092114 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/opensearch/v1/auth.ts @@ -0,0 +1,307 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.mdb.opensearch.v1"; + +export interface AuthSettings { + $type: "yandex.cloud.mdb.opensearch.v1.AuthSettings"; + /** SAML settings */ + saml?: SAMLSettings; +} + +export interface SAMLSettings { + $type: "yandex.cloud.mdb.opensearch.v1.SAMLSettings"; + enabled: boolean; + /** Required. The entity ID of your IdP. */ + idpEntityId: string; + /** Required. The SAML 2.0 metadata file of your IdP. */ + idpMetadataFile: Buffer; + /** Required. The entity ID of the service provider. */ + spEntityId: string; + /** Required. The OpenSearch Dashboards base URL. */ + dashboardsUrl: string; + /** Optional. The attribute in the SAML response where the roles are stored. If not configured, no roles are used. */ + rolesKey: string; + /** Optional. The attribute in the SAML response where the subject is stored. If not configured, the NameID attribute is used. */ + subjectKey: string; +} + +const baseAuthSettings: object = { + $type: "yandex.cloud.mdb.opensearch.v1.AuthSettings", +}; + +export const AuthSettings = { + $type: "yandex.cloud.mdb.opensearch.v1.AuthSettings" as const, + + encode( + message: AuthSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.saml !== undefined) { + SAMLSettings.encode(message.saml, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AuthSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAuthSettings } as AuthSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.saml = SAMLSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AuthSettings { + const message = { ...baseAuthSettings } as AuthSettings; + message.saml = + object.saml !== undefined && object.saml !== null + ? SAMLSettings.fromJSON(object.saml) + : undefined; + return message; + }, + + toJSON(message: AuthSettings): unknown { + const obj: any = {}; + message.saml !== undefined && + (obj.saml = message.saml ? SAMLSettings.toJSON(message.saml) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): AuthSettings { + const message = { ...baseAuthSettings } as AuthSettings; + message.saml = + object.saml !== undefined && object.saml !== null + ? SAMLSettings.fromPartial(object.saml) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(AuthSettings.$type, AuthSettings); + +const baseSAMLSettings: object = { + $type: "yandex.cloud.mdb.opensearch.v1.SAMLSettings", + enabled: false, + idpEntityId: "", + spEntityId: "", + dashboardsUrl: "", + rolesKey: "", + subjectKey: "", +}; + +export const SAMLSettings = { + $type: "yandex.cloud.mdb.opensearch.v1.SAMLSettings" as const, + + encode( + message: SAMLSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enabled === true) { + writer.uint32(8).bool(message.enabled); + } + if (message.idpEntityId !== "") { + writer.uint32(18).string(message.idpEntityId); + } + if (message.idpMetadataFile.length !== 0) { + writer.uint32(26).bytes(message.idpMetadataFile); + } + if (message.spEntityId !== "") { + writer.uint32(34).string(message.spEntityId); + } + if (message.dashboardsUrl !== "") { + writer.uint32(42).string(message.dashboardsUrl); + } + if (message.rolesKey !== "") { + writer.uint32(50).string(message.rolesKey); + } + if (message.subjectKey !== "") { + writer.uint32(58).string(message.subjectKey); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SAMLSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSAMLSettings } as SAMLSettings; + message.idpMetadataFile = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enabled = reader.bool(); + break; + case 2: + message.idpEntityId = reader.string(); + break; + case 3: + message.idpMetadataFile = reader.bytes() as Buffer; + break; + case 4: + message.spEntityId = reader.string(); + break; + case 5: + message.dashboardsUrl = reader.string(); + break; + case 6: + message.rolesKey = reader.string(); + break; + case 7: + message.subjectKey = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SAMLSettings { + const message = { ...baseSAMLSettings } as SAMLSettings; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + message.idpEntityId = + object.idpEntityId !== undefined && object.idpEntityId !== null + ? String(object.idpEntityId) + : ""; + message.idpMetadataFile = + object.idpMetadataFile !== undefined && object.idpMetadataFile !== null + ? Buffer.from(bytesFromBase64(object.idpMetadataFile)) + : Buffer.alloc(0); + message.spEntityId = + object.spEntityId !== undefined && object.spEntityId !== null + ? String(object.spEntityId) + : ""; + message.dashboardsUrl = + object.dashboardsUrl !== undefined && object.dashboardsUrl !== null + ? String(object.dashboardsUrl) + : ""; + message.rolesKey = + object.rolesKey !== undefined && object.rolesKey !== null + ? String(object.rolesKey) + : ""; + message.subjectKey = + object.subjectKey !== undefined && object.subjectKey !== null + ? String(object.subjectKey) + : ""; + return message; + }, + + toJSON(message: SAMLSettings): unknown { + const obj: any = {}; + message.enabled !== undefined && (obj.enabled = message.enabled); + message.idpEntityId !== undefined && + (obj.idpEntityId = message.idpEntityId); + message.idpMetadataFile !== undefined && + (obj.idpMetadataFile = base64FromBytes( + message.idpMetadataFile !== undefined + ? message.idpMetadataFile + : Buffer.alloc(0) + )); + message.spEntityId !== undefined && (obj.spEntityId = message.spEntityId); + message.dashboardsUrl !== undefined && + (obj.dashboardsUrl = message.dashboardsUrl); + message.rolesKey !== undefined && (obj.rolesKey = message.rolesKey); + message.subjectKey !== undefined && (obj.subjectKey = message.subjectKey); + return obj; + }, + + fromPartial, I>>( + object: I + ): SAMLSettings { + const message = { ...baseSAMLSettings } as SAMLSettings; + message.enabled = object.enabled ?? false; + message.idpEntityId = object.idpEntityId ?? ""; + message.idpMetadataFile = object.idpMetadataFile ?? Buffer.alloc(0); + message.spEntityId = object.spEntityId ?? ""; + message.dashboardsUrl = object.dashboardsUrl ?? ""; + message.rolesKey = object.rolesKey ?? ""; + message.subjectKey = object.subjectKey ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(SAMLSettings.$type, SAMLSettings); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (const byte of arr) { + bin.push(String.fromCharCode(byte)); + } + return btoa(bin.join("")); +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/opensearch/v1/backup.ts b/src/generated/yandex/cloud/mdb/opensearch/v1/backup.ts new file mode 100644 index 00000000..c9f216d2 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/opensearch/v1/backup.ts @@ -0,0 +1,280 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.mdb.opensearch.v1"; + +export interface Backup { + $type: "yandex.cloud.mdb.opensearch.v1.Backup"; + /** Required. ID of the backup. */ + id: string; + /** ID of the folder that the backup belongs to. */ + folderId: string; + /** ID of the OpenSearch cluster that the backup was created for. */ + sourceClusterId: string; + /** Time when the backup operation was started. */ + startedAt?: Date; + /** Time when the backup operation was completed. */ + createdAt?: Date; + /** Names of indices in the backup. */ + indices: string[]; + /** OpenSearch version used to create the backup. */ + opensearchVersion: string; + /** Size of the backup in bytes. */ + sizeBytes: number; + /** The number of indices in the backup. */ + indicesTotal: number; +} + +const baseBackup: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Backup", + id: "", + folderId: "", + sourceClusterId: "", + indices: "", + opensearchVersion: "", + sizeBytes: 0, + indicesTotal: 0, +}; + +export const Backup = { + $type: "yandex.cloud.mdb.opensearch.v1.Backup" as const, + + encode( + message: Backup, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.sourceClusterId !== "") { + writer.uint32(26).string(message.sourceClusterId); + } + if (message.startedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.startedAt), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(42).fork() + ).ldelim(); + } + for (const v of message.indices) { + writer.uint32(50).string(v!); + } + if (message.opensearchVersion !== "") { + writer.uint32(58).string(message.opensearchVersion); + } + if (message.sizeBytes !== 0) { + writer.uint32(64).int64(message.sizeBytes); + } + if (message.indicesTotal !== 0) { + writer.uint32(72).int64(message.indicesTotal); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Backup { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackup } as Backup; + message.indices = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.sourceClusterId = reader.string(); + break; + case 4: + message.startedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.indices.push(reader.string()); + break; + case 7: + message.opensearchVersion = reader.string(); + break; + case 8: + message.sizeBytes = longToNumber(reader.int64() as Long); + break; + case 9: + message.indicesTotal = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Backup { + const message = { ...baseBackup } as Backup; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.sourceClusterId = + object.sourceClusterId !== undefined && object.sourceClusterId !== null + ? String(object.sourceClusterId) + : ""; + message.startedAt = + object.startedAt !== undefined && object.startedAt !== null + ? fromJsonTimestamp(object.startedAt) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.indices = (object.indices ?? []).map((e: any) => String(e)); + message.opensearchVersion = + object.opensearchVersion !== undefined && + object.opensearchVersion !== null + ? String(object.opensearchVersion) + : ""; + message.sizeBytes = + object.sizeBytes !== undefined && object.sizeBytes !== null + ? Number(object.sizeBytes) + : 0; + message.indicesTotal = + object.indicesTotal !== undefined && object.indicesTotal !== null + ? Number(object.indicesTotal) + : 0; + return message; + }, + + toJSON(message: Backup): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.sourceClusterId !== undefined && + (obj.sourceClusterId = message.sourceClusterId); + message.startedAt !== undefined && + (obj.startedAt = message.startedAt.toISOString()); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + if (message.indices) { + obj.indices = message.indices.map((e) => e); + } else { + obj.indices = []; + } + message.opensearchVersion !== undefined && + (obj.opensearchVersion = message.opensearchVersion); + message.sizeBytes !== undefined && + (obj.sizeBytes = Math.round(message.sizeBytes)); + message.indicesTotal !== undefined && + (obj.indicesTotal = Math.round(message.indicesTotal)); + return obj; + }, + + fromPartial, I>>(object: I): Backup { + const message = { ...baseBackup } as Backup; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.sourceClusterId = object.sourceClusterId ?? ""; + message.startedAt = object.startedAt ?? undefined; + message.createdAt = object.createdAt ?? undefined; + message.indices = object.indices?.map((e) => e) || []; + message.opensearchVersion = object.opensearchVersion ?? ""; + message.sizeBytes = object.sizeBytes ?? 0; + message.indicesTotal = object.indicesTotal ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Backup.$type, Backup); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/opensearch/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/opensearch/v1/backup_service.ts new file mode 100644 index 00000000..97ad08d8 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/opensearch/v1/backup_service.ts @@ -0,0 +1,434 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Backup } from "../../../../../yandex/cloud/mdb/opensearch/v1/backup"; + +export const protobufPackage = "yandex.cloud.mdb.opensearch.v1"; + +export interface GetBackupRequest { + $type: "yandex.cloud.mdb.opensearch.v1.GetBackupRequest"; + /** ID of the backup to return. */ + backupId: string; +} + +export interface ListBackupsRequest { + $type: "yandex.cloud.mdb.opensearch.v1.ListBackupsRequest"; + /** ID of the folder to list backups in. */ + folderId: string; + /** + * The maximum number of results per page that should be returned. + * + * If the number of available results is larger than [page_size], the service returns + * a [ListBackupsResponse.next_page_token] that can be used to get the next page of results + * in subsequent list requests. + * + * Default value is 100. + */ + pageSize: number; + /** + * The page token. To get the next page of results, set [page_token] to the [ListBackupsResponse.next_page_token] + * returned by the previous list request. + */ + pageToken: string; +} + +export interface ListBackupsResponse { + $type: "yandex.cloud.mdb.opensearch.v1.ListBackupsResponse"; + /** Requested list of backups. */ + backups: Backup[]; + /** + * This token allows you to get the next page of results for a list request. + * + * If the number of results is larger than [ListBackupsRequest.page_size] specified in the request, + * use the [next_page_token] as the value for the [ListBackupsRequest.page_token] parameter in the next list request. + * + * Each subsequent ListBackups request has its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetBackupRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.GetBackupRequest", + backupId: "", +}; + +export const GetBackupRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.GetBackupRequest" as const, + + encode( + message: GetBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetBackupRequest } as GetBackupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: GetBackupRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetBackupRequest.$type, GetBackupRequest); + +const baseListBackupsRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListBackupsRequest", + folderId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListBackupsRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.ListBackupsRequest" as const, + + encode( + message: ListBackupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListBackupsRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsRequest.$type, ListBackupsRequest); + +const baseListBackupsResponse: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListBackupsResponse", + nextPageToken: "", +}; + +export const ListBackupsResponse = { + $type: "yandex.cloud.mdb.opensearch.v1.ListBackupsResponse" as const, + + encode( + message: ListBackupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backups) { + Backup.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backups.push(Backup.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = (object.backups ?? []).map((e: any) => + Backup.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListBackupsResponse): unknown { + const obj: any = {}; + if (message.backups) { + obj.backups = message.backups.map((e) => + e ? Backup.toJSON(e) : undefined + ); + } else { + obj.backups = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = object.backups?.map((e) => Backup.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); + +/** A set of methods for managing backups. */ +export const BackupServiceService = { + /** Returns the specified backup of an OpenSearch cluster. */ + get: { + path: "/yandex.cloud.mdb.opensearch.v1.BackupService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetBackupRequest) => + Buffer.from(GetBackupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetBackupRequest.decode(value), + responseSerialize: (value: Backup) => + Buffer.from(Backup.encode(value).finish()), + responseDeserialize: (value: Buffer) => Backup.decode(value), + }, + /** Returns the list of available backups for the specified OpenSearch cluster. */ + list: { + path: "/yandex.cloud.mdb.opensearch.v1.BackupService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListBackupsRequest) => + Buffer.from(ListBackupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListBackupsRequest.decode(value), + responseSerialize: (value: ListBackupsResponse) => + Buffer.from(ListBackupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListBackupsResponse.decode(value), + }, +} as const; + +export interface BackupServiceServer extends UntypedServiceImplementation { + /** Returns the specified backup of an OpenSearch cluster. */ + get: handleUnaryCall; + /** Returns the list of available backups for the specified OpenSearch cluster. */ + list: handleUnaryCall; +} + +export interface BackupServiceClient extends Client { + /** Returns the specified backup of an OpenSearch cluster. */ + get( + request: GetBackupRequest, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + /** Returns the list of available backups for the specified OpenSearch cluster. */ + list( + request: ListBackupsRequest, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; +} + +export const BackupServiceClient = makeGenericClientConstructor( + BackupServiceService, + "yandex.cloud.mdb.opensearch.v1.BackupService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): BackupServiceClient; + service: typeof BackupServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/opensearch/v1/cluster.ts b/src/generated/yandex/cloud/mdb/opensearch/v1/cluster.ts new file mode 100644 index 00000000..040946ae --- /dev/null +++ b/src/generated/yandex/cloud/mdb/opensearch/v1/cluster.ts @@ -0,0 +1,2454 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + MaintenanceWindow, + MaintenanceOperation, +} from "../../../../../yandex/cloud/mdb/opensearch/v1/maintenance"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; +import { OpenSearchConfigSet2 } from "../../../../../yandex/cloud/mdb/opensearch/v1/config/opensearch"; + +export const protobufPackage = "yandex.cloud.mdb.opensearch.v1"; + +/** An OpenSearch cluster resource. */ +export interface Cluster { + $type: "yandex.cloud.mdb.opensearch.v1.Cluster"; + /** + * ID of the OpenSearch cluster. + * This ID is assigned by the platform at the moment of cluster creation. + */ + id: string; + /** ID of the folder that the OpenSearch cluster belongs to. */ + folderId: string; + /** Time when the cluster was created. */ + createdAt?: Date; + /** + * Name of the OpenSearch cluster. + * The name is unique within the folder. 1-63 characters long. + */ + name: string; + /** Description of the OpenSearch cluster. 0-256 characters long. */ + description: string; + /** + * Custom labels for the OpenSearch cluster as `key:value` pairs. + * Maximum 64 labels per resource. + */ + labels: { [key: string]: string }; + /** Deployment environment of the OpenSearch cluster. */ + environment: Cluster_Environment; + /** Description of monitoring systems relevant to the OpenSearch cluster. */ + monitoring: Monitoring[]; + /** Configuration of the OpenSearch cluster. */ + config?: ClusterConfig; + /** ID of the cloud network that the cluster belongs to. */ + networkId: string; + /** Aggregated cluster health. */ + health: Cluster_Health; + /** Current state of the cluster. */ + status: Cluster_Status; + /** User security groups. */ + securityGroupIds: string[]; + /** ID of the service account used to access Object Storage. */ + serviceAccountId: string; + /** Determines whether the cluster is protected from being deleted. */ + deletionProtection: boolean; + /** Cluster maintenance window. Should be defined by either one of the two options. */ + maintenanceWindow?: MaintenanceWindow; + /** Maintenance operation planned at nearest [maintenance_window]. */ + plannedOperation?: MaintenanceOperation; +} + +export enum Cluster_Environment { + ENVIRONMENT_UNSPECIFIED = 0, + /** + * PRODUCTION - Stable environment with a conservative update policy: + * only hotfixes are applied during regular maintenance. + */ + PRODUCTION = 1, + /** + * PRESTABLE - Environment with more aggressive update policy: new versions + * are rolled out irrespective of backward compatibility. + */ + PRESTABLE = 2, + UNRECOGNIZED = -1, +} + +export function cluster_EnvironmentFromJSON(object: any): Cluster_Environment { + switch (object) { + case 0: + case "ENVIRONMENT_UNSPECIFIED": + return Cluster_Environment.ENVIRONMENT_UNSPECIFIED; + case 1: + case "PRODUCTION": + return Cluster_Environment.PRODUCTION; + case 2: + case "PRESTABLE": + return Cluster_Environment.PRESTABLE; + case -1: + case "UNRECOGNIZED": + default: + return Cluster_Environment.UNRECOGNIZED; + } +} + +export function cluster_EnvironmentToJSON(object: Cluster_Environment): string { + switch (object) { + case Cluster_Environment.ENVIRONMENT_UNSPECIFIED: + return "ENVIRONMENT_UNSPECIFIED"; + case Cluster_Environment.PRODUCTION: + return "PRODUCTION"; + case Cluster_Environment.PRESTABLE: + return "PRESTABLE"; + default: + return "UNKNOWN"; + } +} + +export enum Cluster_Health { + /** HEALTH_UNKNOWN - Health of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). */ + HEALTH_UNKNOWN = 0, + /** ALIVE - Cluster is working normally ([Host.health] for every host in the cluster is ALIVE). */ + ALIVE = 1, + /** DEAD - Cluster is inoperable ([Host.health] for every host in the cluster is DEAD). */ + DEAD = 2, + /** DEGRADED - Cluster is working below capacity ([Host.health] for at least one host in the cluster is not ALIVE). */ + DEGRADED = 3, + UNRECOGNIZED = -1, +} + +export function cluster_HealthFromJSON(object: any): Cluster_Health { + switch (object) { + case 0: + case "HEALTH_UNKNOWN": + return Cluster_Health.HEALTH_UNKNOWN; + case 1: + case "ALIVE": + return Cluster_Health.ALIVE; + case 2: + case "DEAD": + return Cluster_Health.DEAD; + case 3: + case "DEGRADED": + return Cluster_Health.DEGRADED; + case -1: + case "UNRECOGNIZED": + default: + return Cluster_Health.UNRECOGNIZED; + } +} + +export function cluster_HealthToJSON(object: Cluster_Health): string { + switch (object) { + case Cluster_Health.HEALTH_UNKNOWN: + return "HEALTH_UNKNOWN"; + case Cluster_Health.ALIVE: + return "ALIVE"; + case Cluster_Health.DEAD: + return "DEAD"; + case Cluster_Health.DEGRADED: + return "DEGRADED"; + default: + return "UNKNOWN"; + } +} + +/** Current state of the cluster. */ +export enum Cluster_Status { + /** STATUS_UNKNOWN - Cluster state is unknown. */ + STATUS_UNKNOWN = 0, + /** CREATING - Cluster is being created. */ + CREATING = 1, + /** RUNNING - Cluster is running normally. */ + RUNNING = 2, + /** ERROR - Cluster has encountered a problem and cannot operate. */ + ERROR = 3, + /** UPDATING - Cluster is being updated. */ + UPDATING = 4, + /** STOPPING - Cluster is stopping. */ + STOPPING = 5, + /** STOPPED - Cluster has stopped. */ + STOPPED = 6, + /** STARTING - Cluster is starting. */ + STARTING = 7, + UNRECOGNIZED = -1, +} + +export function cluster_StatusFromJSON(object: any): Cluster_Status { + switch (object) { + case 0: + case "STATUS_UNKNOWN": + return Cluster_Status.STATUS_UNKNOWN; + case 1: + case "CREATING": + return Cluster_Status.CREATING; + case 2: + case "RUNNING": + return Cluster_Status.RUNNING; + case 3: + case "ERROR": + return Cluster_Status.ERROR; + case 4: + case "UPDATING": + return Cluster_Status.UPDATING; + case 5: + case "STOPPING": + return Cluster_Status.STOPPING; + case 6: + case "STOPPED": + return Cluster_Status.STOPPED; + case 7: + case "STARTING": + return Cluster_Status.STARTING; + case -1: + case "UNRECOGNIZED": + default: + return Cluster_Status.UNRECOGNIZED; + } +} + +export function cluster_StatusToJSON(object: Cluster_Status): string { + switch (object) { + case Cluster_Status.STATUS_UNKNOWN: + return "STATUS_UNKNOWN"; + case Cluster_Status.CREATING: + return "CREATING"; + case Cluster_Status.RUNNING: + return "RUNNING"; + case Cluster_Status.ERROR: + return "ERROR"; + case Cluster_Status.UPDATING: + return "UPDATING"; + case Cluster_Status.STOPPING: + return "STOPPING"; + case Cluster_Status.STOPPED: + return "STOPPED"; + case Cluster_Status.STARTING: + return "STARTING"; + default: + return "UNKNOWN"; + } +} + +export interface Cluster_LabelsEntry { + $type: "yandex.cloud.mdb.opensearch.v1.Cluster.LabelsEntry"; + key: string; + value: string; +} + +/** Monitoring system metadata. */ +export interface Monitoring { + $type: "yandex.cloud.mdb.opensearch.v1.Monitoring"; + /** Name of the monitoring system. */ + name: string; + /** Description of the monitoring system. */ + description: string; + /** Link to the monitoring system charts for the OpenSearch cluster. */ + link: string; +} + +/** The OpenSearch cluster configuration. */ +export interface ClusterConfig { + $type: "yandex.cloud.mdb.opensearch.v1.ClusterConfig"; + /** Version of the OpenSearch server software. */ + version: string; + /** OpenSearch configuration. */ + opensearch?: OpenSearch; + /** Dashboards configuration. */ + dashboards?: Dashboards; + /** Access policy for external services. */ + access?: Access; +} + +/** The OpenSearch host group type configuration. */ +export interface OpenSearch { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearch"; + /** Names of the cluster plugins. */ + plugins: string[]; + /** Host groups of the OpenSearch type. */ + nodeGroups: OpenSearch_NodeGroup[]; + opensearchConfigSet2?: OpenSearchConfigSet2 | undefined; +} + +export enum OpenSearch_GroupRole { + GROUP_ROLE_UNSPECIFIED = 0, + DATA = 1, + MANAGER = 2, + UNRECOGNIZED = -1, +} + +export function openSearch_GroupRoleFromJSON( + object: any +): OpenSearch_GroupRole { + switch (object) { + case 0: + case "GROUP_ROLE_UNSPECIFIED": + return OpenSearch_GroupRole.GROUP_ROLE_UNSPECIFIED; + case 1: + case "DATA": + return OpenSearch_GroupRole.DATA; + case 2: + case "MANAGER": + return OpenSearch_GroupRole.MANAGER; + case -1: + case "UNRECOGNIZED": + default: + return OpenSearch_GroupRole.UNRECOGNIZED; + } +} + +export function openSearch_GroupRoleToJSON( + object: OpenSearch_GroupRole +): string { + switch (object) { + case OpenSearch_GroupRole.GROUP_ROLE_UNSPECIFIED: + return "GROUP_ROLE_UNSPECIFIED"; + case OpenSearch_GroupRole.DATA: + return "DATA"; + case OpenSearch_GroupRole.MANAGER: + return "MANAGER"; + default: + return "UNKNOWN"; + } +} + +/** Configuration of the host group. */ +export interface OpenSearch_NodeGroup { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearch.NodeGroup"; + /** Name of the group. Must be 1-63 characters long. */ + name: string; + /** Resources allocated to the hosts. */ + resources?: Resources; + /** Number of hosts in the group. */ + hostsCount: number; + /** IDs of the availability zones the hosts belong to. */ + zoneIds: string[]; + /** IDs of the subnets that the hosts belong to. */ + subnetIds: string[]; + /** Determines whether a public IP is assigned to the hosts in the group. */ + assignPublicIp: boolean; + /** Roles of the host group. */ + roles: OpenSearch_GroupRole[]; +} + +/** The Dashboards host group type configuration. */ +export interface Dashboards { + $type: "yandex.cloud.mdb.opensearch.v1.Dashboards"; + /** Host groups of the Dashboards type. */ + nodeGroups: Dashboards_NodeGroup[]; +} + +export interface Dashboards_NodeGroup { + $type: "yandex.cloud.mdb.opensearch.v1.Dashboards.NodeGroup"; + /** Name of the group. 1-63 characters long. */ + name: string; + /** Resources allocated to the hosts. */ + resources?: Resources; + /** Number of hosts in the group. */ + hostsCount: number; + /** IDs of the availability zones the hosts belong to. */ + zoneIds: string[]; + /** IDs of the subnets that the hosts belong to. */ + subnetIds: string[]; + /** Determines whether a public IP is assigned to the hosts in the group. */ + assignPublicIp: boolean; +} + +/** A list of computational resources allocated to a host. */ +export interface Resources { + $type: "yandex.cloud.mdb.opensearch.v1.Resources"; + /** ID of the preset for computational resources allocated to a host. */ + resourcePresetId: string; + /** Volume of the storage used by the host, in bytes. */ + diskSize: number; + /** Type of the storage used by the host: `network-hdd`, `network-ssd` or `local-ssd`. */ + diskTypeId: string; +} + +/** An OpenSearch cluster host resource. */ +export interface Host { + $type: "yandex.cloud.mdb.opensearch.v1.Host"; + /** + * Required. Name of the OpenSearch host. + * + * The host name is assigned by the platform at creation time and cannot be changed. + * + * The name is unique across all MDB hosts that exist on the platform, as it defines the FQDN of the host. + */ + name: string; + /** Required. ID of the OpenSearch cluster. The ID is assigned by the platform at creation time. */ + clusterId: string; + /** ID of the availability zone the OpenSearch host belongs to. */ + zoneId: string; + /** Resources allocated to the OpenSearch host. */ + resources?: Resources; + /** Type of the host. */ + type: Host_Type; + /** Status code of the aggregated health of the host. */ + health: Host_Health; + /** ID of the subnet that the host belongs to. */ + subnetId: string; + /** Determines whether a public IP is assigned to the host. */ + assignPublicIp: boolean; + /** Resources used by the host. */ + system?: Host_SystemMetrics; + /** Name of the host group that the host belongs to. */ + nodeGroup: string; + /** Roles of the host. */ + roles: OpenSearch_GroupRole[]; +} + +export enum Host_Health { + /** UNKNOWN - Health of the host is unknown. */ + UNKNOWN = 0, + /** ALIVE - The host is performing all its functions normally. */ + ALIVE = 1, + /** DEAD - The host is inoperable and cannot perform any of its essential functions. */ + DEAD = 2, + /** DEGRADED - The host is working below capacity or not fully functional. */ + DEGRADED = 3, + UNRECOGNIZED = -1, +} + +export function host_HealthFromJSON(object: any): Host_Health { + switch (object) { + case 0: + case "UNKNOWN": + return Host_Health.UNKNOWN; + case 1: + case "ALIVE": + return Host_Health.ALIVE; + case 2: + case "DEAD": + return Host_Health.DEAD; + case 3: + case "DEGRADED": + return Host_Health.DEGRADED; + case -1: + case "UNRECOGNIZED": + default: + return Host_Health.UNRECOGNIZED; + } +} + +export function host_HealthToJSON(object: Host_Health): string { + switch (object) { + case Host_Health.UNKNOWN: + return "UNKNOWN"; + case Host_Health.ALIVE: + return "ALIVE"; + case Host_Health.DEAD: + return "DEAD"; + case Host_Health.DEGRADED: + return "DEGRADED"; + default: + return "UNKNOWN"; + } +} + +export enum Host_Type { + /** TYPE_UNSPECIFIED - The type is not specified. */ + TYPE_UNSPECIFIED = 0, + /** OPENSEARCH - An OpenSearch type host. */ + OPENSEARCH = 1, + /** DASHBOARDS - A Dashboards type host. */ + DASHBOARDS = 2, + UNRECOGNIZED = -1, +} + +export function host_TypeFromJSON(object: any): Host_Type { + switch (object) { + case 0: + case "TYPE_UNSPECIFIED": + return Host_Type.TYPE_UNSPECIFIED; + case 1: + case "OPENSEARCH": + return Host_Type.OPENSEARCH; + case 2: + case "DASHBOARDS": + return Host_Type.DASHBOARDS; + case -1: + case "UNRECOGNIZED": + default: + return Host_Type.UNRECOGNIZED; + } +} + +export function host_TypeToJSON(object: Host_Type): string { + switch (object) { + case Host_Type.TYPE_UNSPECIFIED: + return "TYPE_UNSPECIFIED"; + case Host_Type.OPENSEARCH: + return "OPENSEARCH"; + case Host_Type.DASHBOARDS: + return "DASHBOARDS"; + default: + return "UNKNOWN"; + } +} + +/** CPU usage of the host. */ +export interface Host_CPUMetric { + $type: "yandex.cloud.mdb.opensearch.v1.Host.CPUMetric"; + /** Time of the record. */ + timestamp: number; + /** Percentage of the CPU used. */ + used: number; +} + +/** RAM usage of the host. */ +export interface Host_MemoryMetric { + $type: "yandex.cloud.mdb.opensearch.v1.Host.MemoryMetric"; + /** Time of the record. */ + timestamp: number; + /** The amount of RAM used, in bytes. */ + used: number; + /** Total amount of RAM allocated to the host. */ + total: number; +} + +/** Disk usage of the host. */ +export interface Host_DiskMetric { + $type: "yandex.cloud.mdb.opensearch.v1.Host.DiskMetric"; + /** Time of the record. */ + timestamp: number; + /** The amount of disk space used, in bytes. */ + used: number; + /** Total amount of disk space allocated to the host. */ + total: number; +} + +/** Resources used by the host. */ +export interface Host_SystemMetrics { + $type: "yandex.cloud.mdb.opensearch.v1.Host.SystemMetrics"; + /** CPU usage of the host. */ + cpu?: Host_CPUMetric; + /** RAM usage of the host. */ + memory?: Host_MemoryMetric; + /** Disk usage of the host. */ + disk?: Host_DiskMetric; +} + +/** Access policy for external services. */ +export interface Access { + $type: "yandex.cloud.mdb.opensearch.v1.Access"; + /** Determines whether the access to Data Transfer is allowed. */ + dataTransfer: boolean; + /** Determines whether the access to Serverless is allowed. */ + serverless: boolean; +} + +const baseCluster: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Cluster", + id: "", + folderId: "", + name: "", + description: "", + environment: 0, + networkId: "", + health: 0, + status: 0, + securityGroupIds: "", + serviceAccountId: "", + deletionProtection: false, +}; + +export const Cluster = { + $type: "yandex.cloud.mdb.opensearch.v1.Cluster" as const, + + encode( + message: Cluster, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(34).string(message.name); + } + if (message.description !== "") { + writer.uint32(42).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + Cluster_LabelsEntry.encode( + { + $type: "yandex.cloud.mdb.opensearch.v1.Cluster.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(50).fork() + ).ldelim(); + }); + if (message.environment !== 0) { + writer.uint32(56).int32(message.environment); + } + for (const v of message.monitoring) { + Monitoring.encode(v!, writer.uint32(66).fork()).ldelim(); + } + if (message.config !== undefined) { + ClusterConfig.encode(message.config, writer.uint32(74).fork()).ldelim(); + } + if (message.networkId !== "") { + writer.uint32(82).string(message.networkId); + } + if (message.health !== 0) { + writer.uint32(88).int32(message.health); + } + if (message.status !== 0) { + writer.uint32(96).int32(message.status); + } + for (const v of message.securityGroupIds) { + writer.uint32(106).string(v!); + } + if (message.serviceAccountId !== "") { + writer.uint32(114).string(message.serviceAccountId); + } + if (message.deletionProtection === true) { + writer.uint32(120).bool(message.deletionProtection); + } + if (message.maintenanceWindow !== undefined) { + MaintenanceWindow.encode( + message.maintenanceWindow, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.plannedOperation !== undefined) { + MaintenanceOperation.encode( + message.plannedOperation, + writer.uint32(138).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Cluster { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCluster } as Cluster; + message.labels = {}; + message.monitoring = []; + message.securityGroupIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.name = reader.string(); + break; + case 5: + message.description = reader.string(); + break; + case 6: + const entry6 = Cluster_LabelsEntry.decode(reader, reader.uint32()); + if (entry6.value !== undefined) { + message.labels[entry6.key] = entry6.value; + } + break; + case 7: + message.environment = reader.int32() as any; + break; + case 8: + message.monitoring.push(Monitoring.decode(reader, reader.uint32())); + break; + case 9: + message.config = ClusterConfig.decode(reader, reader.uint32()); + break; + case 10: + message.networkId = reader.string(); + break; + case 11: + message.health = reader.int32() as any; + break; + case 12: + message.status = reader.int32() as any; + break; + case 13: + message.securityGroupIds.push(reader.string()); + break; + case 14: + message.serviceAccountId = reader.string(); + break; + case 15: + message.deletionProtection = reader.bool(); + break; + case 16: + message.maintenanceWindow = MaintenanceWindow.decode( + reader, + reader.uint32() + ); + break; + case 17: + message.plannedOperation = MaintenanceOperation.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Cluster { + const message = { ...baseCluster } as Cluster; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.environment = + object.environment !== undefined && object.environment !== null + ? cluster_EnvironmentFromJSON(object.environment) + : 0; + message.monitoring = (object.monitoring ?? []).map((e: any) => + Monitoring.fromJSON(e) + ); + message.config = + object.config !== undefined && object.config !== null + ? ClusterConfig.fromJSON(object.config) + : undefined; + message.networkId = + object.networkId !== undefined && object.networkId !== null + ? String(object.networkId) + : ""; + message.health = + object.health !== undefined && object.health !== null + ? cluster_HealthFromJSON(object.health) + : 0; + message.status = + object.status !== undefined && object.status !== null + ? cluster_StatusFromJSON(object.status) + : 0; + message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => + String(e) + ); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromJSON(object.maintenanceWindow) + : undefined; + message.plannedOperation = + object.plannedOperation !== undefined && object.plannedOperation !== null + ? MaintenanceOperation.fromJSON(object.plannedOperation) + : undefined; + return message; + }, + + toJSON(message: Cluster): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.environment !== undefined && + (obj.environment = cluster_EnvironmentToJSON(message.environment)); + if (message.monitoring) { + obj.monitoring = message.monitoring.map((e) => + e ? Monitoring.toJSON(e) : undefined + ); + } else { + obj.monitoring = []; + } + message.config !== undefined && + (obj.config = message.config + ? ClusterConfig.toJSON(message.config) + : undefined); + message.networkId !== undefined && (obj.networkId = message.networkId); + message.health !== undefined && + (obj.health = cluster_HealthToJSON(message.health)); + message.status !== undefined && + (obj.status = cluster_StatusToJSON(message.status)); + if (message.securityGroupIds) { + obj.securityGroupIds = message.securityGroupIds.map((e) => e); + } else { + obj.securityGroupIds = []; + } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + message.maintenanceWindow !== undefined && + (obj.maintenanceWindow = message.maintenanceWindow + ? MaintenanceWindow.toJSON(message.maintenanceWindow) + : undefined); + message.plannedOperation !== undefined && + (obj.plannedOperation = message.plannedOperation + ? MaintenanceOperation.toJSON(message.plannedOperation) + : undefined); + return obj; + }, + + fromPartial, I>>(object: I): Cluster { + const message = { ...baseCluster } as Cluster; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.environment = object.environment ?? 0; + message.monitoring = + object.monitoring?.map((e) => Monitoring.fromPartial(e)) || []; + message.config = + object.config !== undefined && object.config !== null + ? ClusterConfig.fromPartial(object.config) + : undefined; + message.networkId = object.networkId ?? ""; + message.health = object.health ?? 0; + message.status = object.status ?? 0; + message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.serviceAccountId = object.serviceAccountId ?? ""; + message.deletionProtection = object.deletionProtection ?? false; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromPartial(object.maintenanceWindow) + : undefined; + message.plannedOperation = + object.plannedOperation !== undefined && object.plannedOperation !== null + ? MaintenanceOperation.fromPartial(object.plannedOperation) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Cluster.$type, Cluster); + +const baseCluster_LabelsEntry: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Cluster.LabelsEntry", + key: "", + value: "", +}; + +export const Cluster_LabelsEntry = { + $type: "yandex.cloud.mdb.opensearch.v1.Cluster.LabelsEntry" as const, + + encode( + message: Cluster_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Cluster_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCluster_LabelsEntry } as Cluster_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Cluster_LabelsEntry { + const message = { ...baseCluster_LabelsEntry } as Cluster_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Cluster_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): Cluster_LabelsEntry { + const message = { ...baseCluster_LabelsEntry } as Cluster_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Cluster_LabelsEntry.$type, Cluster_LabelsEntry); + +const baseMonitoring: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Monitoring", + name: "", + description: "", + link: "", +}; + +export const Monitoring = { + $type: "yandex.cloud.mdb.opensearch.v1.Monitoring" as const, + + encode( + message: Monitoring, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.description !== "") { + writer.uint32(18).string(message.description); + } + if (message.link !== "") { + writer.uint32(26).string(message.link); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Monitoring { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMonitoring } as Monitoring; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.description = reader.string(); + break; + case 3: + message.link = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Monitoring { + const message = { ...baseMonitoring } as Monitoring; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.link = + object.link !== undefined && object.link !== null + ? String(object.link) + : ""; + return message; + }, + + toJSON(message: Monitoring): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + message.link !== undefined && (obj.link = message.link); + return obj; + }, + + fromPartial, I>>( + object: I + ): Monitoring { + const message = { ...baseMonitoring } as Monitoring; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.link = object.link ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Monitoring.$type, Monitoring); + +const baseClusterConfig: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ClusterConfig", + version: "", +}; + +export const ClusterConfig = { + $type: "yandex.cloud.mdb.opensearch.v1.ClusterConfig" as const, + + encode( + message: ClusterConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.version !== "") { + writer.uint32(10).string(message.version); + } + if (message.opensearch !== undefined) { + OpenSearch.encode(message.opensearch, writer.uint32(18).fork()).ldelim(); + } + if (message.dashboards !== undefined) { + Dashboards.encode(message.dashboards, writer.uint32(26).fork()).ldelim(); + } + if (message.access !== undefined) { + Access.encode(message.access, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ClusterConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClusterConfig } as ClusterConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.string(); + break; + case 2: + message.opensearch = OpenSearch.decode(reader, reader.uint32()); + break; + case 3: + message.dashboards = Dashboards.decode(reader, reader.uint32()); + break; + case 4: + message.access = Access.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClusterConfig { + const message = { ...baseClusterConfig } as ClusterConfig; + message.version = + object.version !== undefined && object.version !== null + ? String(object.version) + : ""; + message.opensearch = + object.opensearch !== undefined && object.opensearch !== null + ? OpenSearch.fromJSON(object.opensearch) + : undefined; + message.dashboards = + object.dashboards !== undefined && object.dashboards !== null + ? Dashboards.fromJSON(object.dashboards) + : undefined; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromJSON(object.access) + : undefined; + return message; + }, + + toJSON(message: ClusterConfig): unknown { + const obj: any = {}; + message.version !== undefined && (obj.version = message.version); + message.opensearch !== undefined && + (obj.opensearch = message.opensearch + ? OpenSearch.toJSON(message.opensearch) + : undefined); + message.dashboards !== undefined && + (obj.dashboards = message.dashboards + ? Dashboards.toJSON(message.dashboards) + : undefined); + message.access !== undefined && + (obj.access = message.access ? Access.toJSON(message.access) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClusterConfig { + const message = { ...baseClusterConfig } as ClusterConfig; + message.version = object.version ?? ""; + message.opensearch = + object.opensearch !== undefined && object.opensearch !== null + ? OpenSearch.fromPartial(object.opensearch) + : undefined; + message.dashboards = + object.dashboards !== undefined && object.dashboards !== null + ? Dashboards.fromPartial(object.dashboards) + : undefined; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromPartial(object.access) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ClusterConfig.$type, ClusterConfig); + +const baseOpenSearch: object = { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearch", + plugins: "", +}; + +export const OpenSearch = { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearch" as const, + + encode( + message: OpenSearch, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.plugins) { + writer.uint32(10).string(v!); + } + for (const v of message.nodeGroups) { + OpenSearch_NodeGroup.encode(v!, writer.uint32(18).fork()).ldelim(); + } + if (message.opensearchConfigSet2 !== undefined) { + OpenSearchConfigSet2.encode( + message.opensearchConfigSet2, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): OpenSearch { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOpenSearch } as OpenSearch; + message.plugins = []; + message.nodeGroups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.plugins.push(reader.string()); + break; + case 2: + message.nodeGroups.push( + OpenSearch_NodeGroup.decode(reader, reader.uint32()) + ); + break; + case 3: + message.opensearchConfigSet2 = OpenSearchConfigSet2.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OpenSearch { + const message = { ...baseOpenSearch } as OpenSearch; + message.plugins = (object.plugins ?? []).map((e: any) => String(e)); + message.nodeGroups = (object.nodeGroups ?? []).map((e: any) => + OpenSearch_NodeGroup.fromJSON(e) + ); + message.opensearchConfigSet2 = + object.opensearchConfigSet_2 !== undefined && + object.opensearchConfigSet_2 !== null + ? OpenSearchConfigSet2.fromJSON(object.opensearchConfigSet_2) + : undefined; + return message; + }, + + toJSON(message: OpenSearch): unknown { + const obj: any = {}; + if (message.plugins) { + obj.plugins = message.plugins.map((e) => e); + } else { + obj.plugins = []; + } + if (message.nodeGroups) { + obj.nodeGroups = message.nodeGroups.map((e) => + e ? OpenSearch_NodeGroup.toJSON(e) : undefined + ); + } else { + obj.nodeGroups = []; + } + message.opensearchConfigSet2 !== undefined && + (obj.opensearchConfigSet_2 = message.opensearchConfigSet2 + ? OpenSearchConfigSet2.toJSON(message.opensearchConfigSet2) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): OpenSearch { + const message = { ...baseOpenSearch } as OpenSearch; + message.plugins = object.plugins?.map((e) => e) || []; + message.nodeGroups = + object.nodeGroups?.map((e) => OpenSearch_NodeGroup.fromPartial(e)) || []; + message.opensearchConfigSet2 = + object.opensearchConfigSet2 !== undefined && + object.opensearchConfigSet2 !== null + ? OpenSearchConfigSet2.fromPartial(object.opensearchConfigSet2) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(OpenSearch.$type, OpenSearch); + +const baseOpenSearch_NodeGroup: object = { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearch.NodeGroup", + name: "", + hostsCount: 0, + zoneIds: "", + subnetIds: "", + assignPublicIp: false, + roles: 0, +}; + +export const OpenSearch_NodeGroup = { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearch.NodeGroup" as const, + + encode( + message: OpenSearch_NodeGroup, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + if (message.hostsCount !== 0) { + writer.uint32(24).int64(message.hostsCount); + } + for (const v of message.zoneIds) { + writer.uint32(34).string(v!); + } + for (const v of message.subnetIds) { + writer.uint32(42).string(v!); + } + if (message.assignPublicIp === true) { + writer.uint32(48).bool(message.assignPublicIp); + } + writer.uint32(58).fork(); + for (const v of message.roles) { + writer.int32(v); + } + writer.ldelim(); + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): OpenSearch_NodeGroup { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOpenSearch_NodeGroup } as OpenSearch_NodeGroup; + message.zoneIds = []; + message.subnetIds = []; + message.roles = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + case 3: + message.hostsCount = longToNumber(reader.int64() as Long); + break; + case 4: + message.zoneIds.push(reader.string()); + break; + case 5: + message.subnetIds.push(reader.string()); + break; + case 6: + message.assignPublicIp = reader.bool(); + break; + case 7: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.roles.push(reader.int32() as any); + } + } else { + message.roles.push(reader.int32() as any); + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OpenSearch_NodeGroup { + const message = { ...baseOpenSearch_NodeGroup } as OpenSearch_NodeGroup; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + message.hostsCount = + object.hostsCount !== undefined && object.hostsCount !== null + ? Number(object.hostsCount) + : 0; + message.zoneIds = (object.zoneIds ?? []).map((e: any) => String(e)); + message.subnetIds = (object.subnetIds ?? []).map((e: any) => String(e)); + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; + message.roles = (object.roles ?? []).map((e: any) => + openSearch_GroupRoleFromJSON(e) + ); + return message; + }, + + toJSON(message: OpenSearch_NodeGroup): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + message.hostsCount !== undefined && + (obj.hostsCount = Math.round(message.hostsCount)); + if (message.zoneIds) { + obj.zoneIds = message.zoneIds.map((e) => e); + } else { + obj.zoneIds = []; + } + if (message.subnetIds) { + obj.subnetIds = message.subnetIds.map((e) => e); + } else { + obj.subnetIds = []; + } + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + if (message.roles) { + obj.roles = message.roles.map((e) => openSearch_GroupRoleToJSON(e)); + } else { + obj.roles = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): OpenSearch_NodeGroup { + const message = { ...baseOpenSearch_NodeGroup } as OpenSearch_NodeGroup; + message.name = object.name ?? ""; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.hostsCount = object.hostsCount ?? 0; + message.zoneIds = object.zoneIds?.map((e) => e) || []; + message.subnetIds = object.subnetIds?.map((e) => e) || []; + message.assignPublicIp = object.assignPublicIp ?? false; + message.roles = object.roles?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(OpenSearch_NodeGroup.$type, OpenSearch_NodeGroup); + +const baseDashboards: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Dashboards", +}; + +export const Dashboards = { + $type: "yandex.cloud.mdb.opensearch.v1.Dashboards" as const, + + encode( + message: Dashboards, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.nodeGroups) { + Dashboards_NodeGroup.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Dashboards { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDashboards } as Dashboards; + message.nodeGroups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.nodeGroups.push( + Dashboards_NodeGroup.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Dashboards { + const message = { ...baseDashboards } as Dashboards; + message.nodeGroups = (object.nodeGroups ?? []).map((e: any) => + Dashboards_NodeGroup.fromJSON(e) + ); + return message; + }, + + toJSON(message: Dashboards): unknown { + const obj: any = {}; + if (message.nodeGroups) { + obj.nodeGroups = message.nodeGroups.map((e) => + e ? Dashboards_NodeGroup.toJSON(e) : undefined + ); + } else { + obj.nodeGroups = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): Dashboards { + const message = { ...baseDashboards } as Dashboards; + message.nodeGroups = + object.nodeGroups?.map((e) => Dashboards_NodeGroup.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Dashboards.$type, Dashboards); + +const baseDashboards_NodeGroup: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Dashboards.NodeGroup", + name: "", + hostsCount: 0, + zoneIds: "", + subnetIds: "", + assignPublicIp: false, +}; + +export const Dashboards_NodeGroup = { + $type: "yandex.cloud.mdb.opensearch.v1.Dashboards.NodeGroup" as const, + + encode( + message: Dashboards_NodeGroup, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + if (message.hostsCount !== 0) { + writer.uint32(24).int64(message.hostsCount); + } + for (const v of message.zoneIds) { + writer.uint32(34).string(v!); + } + for (const v of message.subnetIds) { + writer.uint32(42).string(v!); + } + if (message.assignPublicIp === true) { + writer.uint32(48).bool(message.assignPublicIp); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Dashboards_NodeGroup { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDashboards_NodeGroup } as Dashboards_NodeGroup; + message.zoneIds = []; + message.subnetIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + case 3: + message.hostsCount = longToNumber(reader.int64() as Long); + break; + case 4: + message.zoneIds.push(reader.string()); + break; + case 5: + message.subnetIds.push(reader.string()); + break; + case 6: + message.assignPublicIp = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Dashboards_NodeGroup { + const message = { ...baseDashboards_NodeGroup } as Dashboards_NodeGroup; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + message.hostsCount = + object.hostsCount !== undefined && object.hostsCount !== null + ? Number(object.hostsCount) + : 0; + message.zoneIds = (object.zoneIds ?? []).map((e: any) => String(e)); + message.subnetIds = (object.subnetIds ?? []).map((e: any) => String(e)); + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; + return message; + }, + + toJSON(message: Dashboards_NodeGroup): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + message.hostsCount !== undefined && + (obj.hostsCount = Math.round(message.hostsCount)); + if (message.zoneIds) { + obj.zoneIds = message.zoneIds.map((e) => e); + } else { + obj.zoneIds = []; + } + if (message.subnetIds) { + obj.subnetIds = message.subnetIds.map((e) => e); + } else { + obj.subnetIds = []; + } + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + return obj; + }, + + fromPartial, I>>( + object: I + ): Dashboards_NodeGroup { + const message = { ...baseDashboards_NodeGroup } as Dashboards_NodeGroup; + message.name = object.name ?? ""; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.hostsCount = object.hostsCount ?? 0; + message.zoneIds = object.zoneIds?.map((e) => e) || []; + message.subnetIds = object.subnetIds?.map((e) => e) || []; + message.assignPublicIp = object.assignPublicIp ?? false; + return message; + }, +}; + +messageTypeRegistry.set(Dashboards_NodeGroup.$type, Dashboards_NodeGroup); + +const baseResources: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Resources", + resourcePresetId: "", + diskSize: 0, + diskTypeId: "", +}; + +export const Resources = { + $type: "yandex.cloud.mdb.opensearch.v1.Resources" as const, + + encode( + message: Resources, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourcePresetId !== "") { + writer.uint32(10).string(message.resourcePresetId); + } + if (message.diskSize !== 0) { + writer.uint32(16).int64(message.diskSize); + } + if (message.diskTypeId !== "") { + writer.uint32(26).string(message.diskTypeId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Resources { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseResources } as Resources; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourcePresetId = reader.string(); + break; + case 2: + message.diskSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.diskTypeId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Resources { + const message = { ...baseResources } as Resources; + message.resourcePresetId = + object.resourcePresetId !== undefined && object.resourcePresetId !== null + ? String(object.resourcePresetId) + : ""; + message.diskSize = + object.diskSize !== undefined && object.diskSize !== null + ? Number(object.diskSize) + : 0; + message.diskTypeId = + object.diskTypeId !== undefined && object.diskTypeId !== null + ? String(object.diskTypeId) + : ""; + return message; + }, + + toJSON(message: Resources): unknown { + const obj: any = {}; + message.resourcePresetId !== undefined && + (obj.resourcePresetId = message.resourcePresetId); + message.diskSize !== undefined && + (obj.diskSize = Math.round(message.diskSize)); + message.diskTypeId !== undefined && (obj.diskTypeId = message.diskTypeId); + return obj; + }, + + fromPartial, I>>( + object: I + ): Resources { + const message = { ...baseResources } as Resources; + message.resourcePresetId = object.resourcePresetId ?? ""; + message.diskSize = object.diskSize ?? 0; + message.diskTypeId = object.diskTypeId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Resources.$type, Resources); + +const baseHost: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Host", + name: "", + clusterId: "", + zoneId: "", + type: 0, + health: 0, + subnetId: "", + assignPublicIp: false, + nodeGroup: "", + roles: 0, +}; + +export const Host = { + $type: "yandex.cloud.mdb.opensearch.v1.Host" as const, + + encode(message: Host, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.clusterId !== "") { + writer.uint32(18).string(message.clusterId); + } + if (message.zoneId !== "") { + writer.uint32(26).string(message.zoneId); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(34).fork()).ldelim(); + } + if (message.type !== 0) { + writer.uint32(40).int32(message.type); + } + if (message.health !== 0) { + writer.uint32(48).int32(message.health); + } + if (message.subnetId !== "") { + writer.uint32(66).string(message.subnetId); + } + if (message.assignPublicIp === true) { + writer.uint32(72).bool(message.assignPublicIp); + } + if (message.system !== undefined) { + Host_SystemMetrics.encode( + message.system, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.nodeGroup !== "") { + writer.uint32(90).string(message.nodeGroup); + } + writer.uint32(98).fork(); + for (const v of message.roles) { + writer.int32(v); + } + writer.ldelim(); + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Host { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseHost } as Host; + message.roles = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.clusterId = reader.string(); + break; + case 3: + message.zoneId = reader.string(); + break; + case 4: + message.resources = Resources.decode(reader, reader.uint32()); + break; + case 5: + message.type = reader.int32() as any; + break; + case 6: + message.health = reader.int32() as any; + break; + case 8: + message.subnetId = reader.string(); + break; + case 9: + message.assignPublicIp = reader.bool(); + break; + case 10: + message.system = Host_SystemMetrics.decode(reader, reader.uint32()); + break; + case 11: + message.nodeGroup = reader.string(); + break; + case 12: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.roles.push(reader.int32() as any); + } + } else { + message.roles.push(reader.int32() as any); + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Host { + const message = { ...baseHost } as Host; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.zoneId = + object.zoneId !== undefined && object.zoneId !== null + ? String(object.zoneId) + : ""; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + message.type = + object.type !== undefined && object.type !== null + ? host_TypeFromJSON(object.type) + : 0; + message.health = + object.health !== undefined && object.health !== null + ? host_HealthFromJSON(object.health) + : 0; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; + message.system = + object.system !== undefined && object.system !== null + ? Host_SystemMetrics.fromJSON(object.system) + : undefined; + message.nodeGroup = + object.nodeGroup !== undefined && object.nodeGroup !== null + ? String(object.nodeGroup) + : ""; + message.roles = (object.roles ?? []).map((e: any) => + openSearch_GroupRoleFromJSON(e) + ); + return message; + }, + + toJSON(message: Host): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.zoneId !== undefined && (obj.zoneId = message.zoneId); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + message.type !== undefined && (obj.type = host_TypeToJSON(message.type)); + message.health !== undefined && + (obj.health = host_HealthToJSON(message.health)); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + message.system !== undefined && + (obj.system = message.system + ? Host_SystemMetrics.toJSON(message.system) + : undefined); + message.nodeGroup !== undefined && (obj.nodeGroup = message.nodeGroup); + if (message.roles) { + obj.roles = message.roles.map((e) => openSearch_GroupRoleToJSON(e)); + } else { + obj.roles = []; + } + return obj; + }, + + fromPartial, I>>(object: I): Host { + const message = { ...baseHost } as Host; + message.name = object.name ?? ""; + message.clusterId = object.clusterId ?? ""; + message.zoneId = object.zoneId ?? ""; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.type = object.type ?? 0; + message.health = object.health ?? 0; + message.subnetId = object.subnetId ?? ""; + message.assignPublicIp = object.assignPublicIp ?? false; + message.system = + object.system !== undefined && object.system !== null + ? Host_SystemMetrics.fromPartial(object.system) + : undefined; + message.nodeGroup = object.nodeGroup ?? ""; + message.roles = object.roles?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(Host.$type, Host); + +const baseHost_CPUMetric: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Host.CPUMetric", + timestamp: 0, + used: 0, +}; + +export const Host_CPUMetric = { + $type: "yandex.cloud.mdb.opensearch.v1.Host.CPUMetric" as const, + + encode( + message: Host_CPUMetric, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.timestamp !== 0) { + writer.uint32(8).int64(message.timestamp); + } + if (message.used !== 0) { + writer.uint32(17).double(message.used); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Host_CPUMetric { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseHost_CPUMetric } as Host_CPUMetric; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.timestamp = longToNumber(reader.int64() as Long); + break; + case 2: + message.used = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Host_CPUMetric { + const message = { ...baseHost_CPUMetric } as Host_CPUMetric; + message.timestamp = + object.timestamp !== undefined && object.timestamp !== null + ? Number(object.timestamp) + : 0; + message.used = + object.used !== undefined && object.used !== null + ? Number(object.used) + : 0; + return message; + }, + + toJSON(message: Host_CPUMetric): unknown { + const obj: any = {}; + message.timestamp !== undefined && + (obj.timestamp = Math.round(message.timestamp)); + message.used !== undefined && (obj.used = message.used); + return obj; + }, + + fromPartial, I>>( + object: I + ): Host_CPUMetric { + const message = { ...baseHost_CPUMetric } as Host_CPUMetric; + message.timestamp = object.timestamp ?? 0; + message.used = object.used ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Host_CPUMetric.$type, Host_CPUMetric); + +const baseHost_MemoryMetric: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Host.MemoryMetric", + timestamp: 0, + used: 0, + total: 0, +}; + +export const Host_MemoryMetric = { + $type: "yandex.cloud.mdb.opensearch.v1.Host.MemoryMetric" as const, + + encode( + message: Host_MemoryMetric, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.timestamp !== 0) { + writer.uint32(8).int64(message.timestamp); + } + if (message.used !== 0) { + writer.uint32(16).int64(message.used); + } + if (message.total !== 0) { + writer.uint32(24).int64(message.total); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Host_MemoryMetric { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseHost_MemoryMetric } as Host_MemoryMetric; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.timestamp = longToNumber(reader.int64() as Long); + break; + case 2: + message.used = longToNumber(reader.int64() as Long); + break; + case 3: + message.total = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Host_MemoryMetric { + const message = { ...baseHost_MemoryMetric } as Host_MemoryMetric; + message.timestamp = + object.timestamp !== undefined && object.timestamp !== null + ? Number(object.timestamp) + : 0; + message.used = + object.used !== undefined && object.used !== null + ? Number(object.used) + : 0; + message.total = + object.total !== undefined && object.total !== null + ? Number(object.total) + : 0; + return message; + }, + + toJSON(message: Host_MemoryMetric): unknown { + const obj: any = {}; + message.timestamp !== undefined && + (obj.timestamp = Math.round(message.timestamp)); + message.used !== undefined && (obj.used = Math.round(message.used)); + message.total !== undefined && (obj.total = Math.round(message.total)); + return obj; + }, + + fromPartial, I>>( + object: I + ): Host_MemoryMetric { + const message = { ...baseHost_MemoryMetric } as Host_MemoryMetric; + message.timestamp = object.timestamp ?? 0; + message.used = object.used ?? 0; + message.total = object.total ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Host_MemoryMetric.$type, Host_MemoryMetric); + +const baseHost_DiskMetric: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Host.DiskMetric", + timestamp: 0, + used: 0, + total: 0, +}; + +export const Host_DiskMetric = { + $type: "yandex.cloud.mdb.opensearch.v1.Host.DiskMetric" as const, + + encode( + message: Host_DiskMetric, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.timestamp !== 0) { + writer.uint32(8).int64(message.timestamp); + } + if (message.used !== 0) { + writer.uint32(16).int64(message.used); + } + if (message.total !== 0) { + writer.uint32(24).int64(message.total); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Host_DiskMetric { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseHost_DiskMetric } as Host_DiskMetric; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.timestamp = longToNumber(reader.int64() as Long); + break; + case 2: + message.used = longToNumber(reader.int64() as Long); + break; + case 3: + message.total = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Host_DiskMetric { + const message = { ...baseHost_DiskMetric } as Host_DiskMetric; + message.timestamp = + object.timestamp !== undefined && object.timestamp !== null + ? Number(object.timestamp) + : 0; + message.used = + object.used !== undefined && object.used !== null + ? Number(object.used) + : 0; + message.total = + object.total !== undefined && object.total !== null + ? Number(object.total) + : 0; + return message; + }, + + toJSON(message: Host_DiskMetric): unknown { + const obj: any = {}; + message.timestamp !== undefined && + (obj.timestamp = Math.round(message.timestamp)); + message.used !== undefined && (obj.used = Math.round(message.used)); + message.total !== undefined && (obj.total = Math.round(message.total)); + return obj; + }, + + fromPartial, I>>( + object: I + ): Host_DiskMetric { + const message = { ...baseHost_DiskMetric } as Host_DiskMetric; + message.timestamp = object.timestamp ?? 0; + message.used = object.used ?? 0; + message.total = object.total ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Host_DiskMetric.$type, Host_DiskMetric); + +const baseHost_SystemMetrics: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Host.SystemMetrics", +}; + +export const Host_SystemMetrics = { + $type: "yandex.cloud.mdb.opensearch.v1.Host.SystemMetrics" as const, + + encode( + message: Host_SystemMetrics, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cpu !== undefined) { + Host_CPUMetric.encode(message.cpu, writer.uint32(10).fork()).ldelim(); + } + if (message.memory !== undefined) { + Host_MemoryMetric.encode( + message.memory, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.disk !== undefined) { + Host_DiskMetric.encode(message.disk, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Host_SystemMetrics { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseHost_SystemMetrics } as Host_SystemMetrics; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cpu = Host_CPUMetric.decode(reader, reader.uint32()); + break; + case 2: + message.memory = Host_MemoryMetric.decode(reader, reader.uint32()); + break; + case 3: + message.disk = Host_DiskMetric.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Host_SystemMetrics { + const message = { ...baseHost_SystemMetrics } as Host_SystemMetrics; + message.cpu = + object.cpu !== undefined && object.cpu !== null + ? Host_CPUMetric.fromJSON(object.cpu) + : undefined; + message.memory = + object.memory !== undefined && object.memory !== null + ? Host_MemoryMetric.fromJSON(object.memory) + : undefined; + message.disk = + object.disk !== undefined && object.disk !== null + ? Host_DiskMetric.fromJSON(object.disk) + : undefined; + return message; + }, + + toJSON(message: Host_SystemMetrics): unknown { + const obj: any = {}; + message.cpu !== undefined && + (obj.cpu = message.cpu ? Host_CPUMetric.toJSON(message.cpu) : undefined); + message.memory !== undefined && + (obj.memory = message.memory + ? Host_MemoryMetric.toJSON(message.memory) + : undefined); + message.disk !== undefined && + (obj.disk = message.disk + ? Host_DiskMetric.toJSON(message.disk) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Host_SystemMetrics { + const message = { ...baseHost_SystemMetrics } as Host_SystemMetrics; + message.cpu = + object.cpu !== undefined && object.cpu !== null + ? Host_CPUMetric.fromPartial(object.cpu) + : undefined; + message.memory = + object.memory !== undefined && object.memory !== null + ? Host_MemoryMetric.fromPartial(object.memory) + : undefined; + message.disk = + object.disk !== undefined && object.disk !== null + ? Host_DiskMetric.fromPartial(object.disk) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Host_SystemMetrics.$type, Host_SystemMetrics); + +const baseAccess: object = { + $type: "yandex.cloud.mdb.opensearch.v1.Access", + dataTransfer: false, + serverless: false, +}; + +export const Access = { + $type: "yandex.cloud.mdb.opensearch.v1.Access" as const, + + encode( + message: Access, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dataTransfer === true) { + writer.uint32(8).bool(message.dataTransfer); + } + if (message.serverless === true) { + writer.uint32(16).bool(message.serverless); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Access { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAccess } as Access; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dataTransfer = reader.bool(); + break; + case 2: + message.serverless = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Access { + const message = { ...baseAccess } as Access; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; + message.serverless = + object.serverless !== undefined && object.serverless !== null + ? Boolean(object.serverless) + : false; + return message; + }, + + toJSON(message: Access): unknown { + const obj: any = {}; + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); + message.serverless !== undefined && (obj.serverless = message.serverless); + return obj; + }, + + fromPartial, I>>(object: I): Access { + const message = { ...baseAccess } as Access; + message.dataTransfer = object.dataTransfer ?? false; + message.serverless = object.serverless ?? false; + return message; + }, +}; + +messageTypeRegistry.set(Access.$type, Access); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/opensearch/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/opensearch/v1/cluster_service.ts new file mode 100644 index 00000000..372ce5af --- /dev/null +++ b/src/generated/yandex/cloud/mdb/opensearch/v1/cluster_service.ts @@ -0,0 +1,7809 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + handleServerStreamingCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ClientReadableStream, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + Cluster_Environment, + Access, + Resources, + Cluster, + Host, + OpenSearch_GroupRole, + cluster_EnvironmentFromJSON, + cluster_EnvironmentToJSON, + openSearch_GroupRoleFromJSON, + openSearch_GroupRoleToJSON, +} from "../../../../../yandex/cloud/mdb/opensearch/v1/cluster"; +import { MaintenanceWindow } from "../../../../../yandex/cloud/mdb/opensearch/v1/maintenance"; +import { FieldMask } from "../../../../../google/protobuf/field_mask"; +import { AuthSettings } from "../../../../../yandex/cloud/mdb/opensearch/v1/auth"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; +import { OpenSearchConfig2 } from "../../../../../yandex/cloud/mdb/opensearch/v1/config/opensearch"; +import { Backup } from "../../../../../yandex/cloud/mdb/opensearch/v1/backup"; + +export const protobufPackage = "yandex.cloud.mdb.opensearch.v1"; + +export interface GetClusterRequest { + $type: "yandex.cloud.mdb.opensearch.v1.GetClusterRequest"; + /** + * ID of the OpenSearch cluster to return. + * + * To get the cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; +} + +export interface ListClustersRequest { + $type: "yandex.cloud.mdb.opensearch.v1.ListClustersRequest"; + /** + * ID of the folder to list OpenSearch clusters in. + * + * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; + /** + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns + * a [ListClustersResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] + * returned by the previous list request. + */ + pageToken: string; + /** + * A filter expression that filters resources listed in the response. + * + * The expression must specify: + * + * 1. The field name. Currently you can only use filtering with the [Cluster.name] field. + * + * 2. An `=` operator. + * + * 3. The value in double quotes (`"`). Must be 1-63 characters long and match the regular expression `[a-zA-Z0-9_-]+`. + */ + filter: string; +} + +export interface ListClustersResponse { + $type: "yandex.cloud.mdb.opensearch.v1.ListClustersResponse"; + /** List of OpenSearch clusters. */ + clusters: Cluster[]; + /** + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value + * for the [ListClustersRequest.page_token] parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface CreateClusterRequest { + $type: "yandex.cloud.mdb.opensearch.v1.CreateClusterRequest"; + /** ID of the folder to create the OpenSearch cluster in. */ + folderId: string; + /** Name of the OpenSearch cluster. The name must be unique within the folder. */ + name: string; + /** Description of the OpenSearch cluster. */ + description: string; + /** + * Custom labels for the OpenSearch cluster as `key:value` pairs. + * For example, `"project": "mvp"` or `"source": "dictionary"`. + */ + labels: { [key: string]: string }; + /** Deployment environment of the OpenSearch cluster. */ + environment: Cluster_Environment; + /** OpenSearch cluster configuration. */ + configSpec?: ConfigCreateSpec; + /** ID of the network to create the cluster in. */ + networkId: string; + /** User security groups. */ + securityGroupIds: string[]; + /** ID of the service account used to access Object Storage. */ + serviceAccountId: string; + /** Determines whether the cluster is protected from being deleted. */ + deletionProtection: boolean; + /** Cluster maintenance window. Should be defined by either one of the two options. */ + maintenanceWindow?: MaintenanceWindow; +} + +export interface CreateClusterRequest_LabelsEntry { + $type: "yandex.cloud.mdb.opensearch.v1.CreateClusterRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateClusterMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.CreateClusterMetadata"; + /** ID of the OpenSearch cluster that is being created. */ + clusterId: string; +} + +export interface UpdateClusterRequest { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateClusterRequest"; + /** + * ID of the OpenSearch cluster resource to update. + * To get the OpenSearch cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** Field mask that specifies which fields of the OpenSearch cluster resource should be updated. */ + updateMask?: FieldMask; + /** New description of the OpenSearch cluster. */ + description: string; + /** + * Custom labels for the OpenSearch cluster as `key:value` pairs. + * For example, `"project": "mvp"` or `"source": "dictionary"`. + * + * The new set of labels completely replaces the old one. To add a label, request the current + * set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. + */ + labels: { [key: string]: string }; + /** New cluster configuration */ + configSpec?: ConfigUpdateSpec; + /** New name for the cluster. The name must be unique within the folder. */ + name: string; + /** User security groups */ + securityGroupIds: string[]; + /** ID of the service account used to access Object Storage. */ + serviceAccountId: string; + /** Determines whether the cluster is protected from being deleted. */ + deletionProtection: boolean; + /** Cluster maintenance window. Should be defined by either one of the two options. */ + maintenanceWindow?: MaintenanceWindow; +} + +export interface UpdateClusterRequest_LabelsEntry { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateClusterRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateClusterMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateClusterMetadata"; + /** ID of the OpenSearch cluster resource that is being updated. */ + clusterId: string; +} + +export interface DeleteClusterRequest { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteClusterRequest"; + /** + * ID of the OpenSearch cluster to delete. + * To get the OpenSearch cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; +} + +export interface DeleteClusterMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteClusterMetadata"; + /** ID of the OpenSearch cluster that is being deleted. */ + clusterId: string; +} + +export interface ListClusterLogsRequest { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterLogsRequest"; + /** + * ID of the OpenSearch cluster to request logs for. + * + * To get the OpenSearch cluster ID use a [ClusterService.List] request. + */ + clusterId: string; + /** + * Columns from log table to request. + * If no columns are specified, entire log records are returned. + */ + columnFilter: string[]; + /** Start timestamp for the logs request. */ + fromTime?: Date; + /** End timestamp for the logs request. */ + toTime?: Date; + /** + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListClusterLogsResponse.next_page_token] returned by the previous list request. + */ + pageToken: string; + /** The service always returns a [ListClusterLogsResponse.next_page_token], even if the current page is empty. */ + alwaysNextPageToken: boolean; + /** + * A filter expression that filters resources listed in the response. + * + * The expression must specify: + * + * 1. A field name. Currently filtering can be applied to the [LogRecord.logs.message.hostname] field. + * + * 2. A conditional operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + * + * 3. A value. Must be 1-63 characters long and match the regular expression `^[a-z0-9.-]{1,61}$`. + * + * Examples of a filter: + * * `message.hostname='node1.db.cloud.yandex.net'`; + * * `message.error_severity IN ("ERROR", "FATAL", "PANIC") AND message.hostname = "node1.db.cloud.yandex.net"`. + */ + filter: string; + /** Type of the service to request logs about. */ + serviceType: ListClusterLogsRequest_ServiceType; +} + +export enum ListClusterLogsRequest_ServiceType { + /** SERVICE_TYPE_UNSPECIFIED - Type is not specified. */ + SERVICE_TYPE_UNSPECIFIED = 0, + /** OPENSEARCH - OpenSearch logs. */ + OPENSEARCH = 1, + /** DASHBOARDS - Dashboards logs. */ + DASHBOARDS = 2, + UNRECOGNIZED = -1, +} + +export function listClusterLogsRequest_ServiceTypeFromJSON( + object: any +): ListClusterLogsRequest_ServiceType { + switch (object) { + case 0: + case "SERVICE_TYPE_UNSPECIFIED": + return ListClusterLogsRequest_ServiceType.SERVICE_TYPE_UNSPECIFIED; + case 1: + case "OPENSEARCH": + return ListClusterLogsRequest_ServiceType.OPENSEARCH; + case 2: + case "DASHBOARDS": + return ListClusterLogsRequest_ServiceType.DASHBOARDS; + case -1: + case "UNRECOGNIZED": + default: + return ListClusterLogsRequest_ServiceType.UNRECOGNIZED; + } +} + +export function listClusterLogsRequest_ServiceTypeToJSON( + object: ListClusterLogsRequest_ServiceType +): string { + switch (object) { + case ListClusterLogsRequest_ServiceType.SERVICE_TYPE_UNSPECIFIED: + return "SERVICE_TYPE_UNSPECIFIED"; + case ListClusterLogsRequest_ServiceType.OPENSEARCH: + return "OPENSEARCH"; + case ListClusterLogsRequest_ServiceType.DASHBOARDS: + return "DASHBOARDS"; + default: + return "UNKNOWN"; + } +} + +export interface LogRecord { + $type: "yandex.cloud.mdb.opensearch.v1.LogRecord"; + /** Time when the log was recorded. */ + timestamp?: Date; + /** Contents of the log record. */ + message: { [key: string]: string }; +} + +export interface LogRecord_MessageEntry { + $type: "yandex.cloud.mdb.opensearch.v1.LogRecord.MessageEntry"; + key: string; + value: string; +} + +export interface ListClusterLogsResponse { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterLogsResponse"; + /** Requested log records. */ + logs: LogRecord[]; + /** + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value + * for the [ListClusterLogsRequest.page_token] query parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. + * + * This value is interchangeable with the [StreamLogRecord.next_record_token] from [StreamLogs] method. + */ + nextPageToken: string; +} + +export interface StreamLogRecord { + $type: "yandex.cloud.mdb.opensearch.v1.StreamLogRecord"; + /** One of the requested log records. */ + record?: LogRecord; + /** + * This token allows you to continue streaming logs starting from the exact same record. + * + * To do that, specify value of [next_record_token] as the value for [StreamLogs.record_token] parameter in the next [StreamLogs] request. + * + * This value is interchangeable with [ListLogs.next_page_token] from [ListLogs] method. + */ + nextRecordToken: string; +} + +export interface StreamClusterLogsRequest { + $type: "yandex.cloud.mdb.opensearch.v1.StreamClusterLogsRequest"; + /** ID of the OpenSearch cluster. */ + clusterId: string; + /** + * Columns from log table to get in the response. + * If no columns are specified, entire log records are returned. + */ + columnFilter: string[]; + /** Start timestamp for the logs request. */ + fromTime?: Date; + /** + * End timestamp for the logs request. + * + * If this field is not set, all existing logs are sent as well as the new ones as they appear. + * + * In essence it has `tail -f` semantics. + */ + toTime?: Date; + /** + * Record token. Set `record_token` to the `next_record_token` returned by the previous [StreamLogs] + * request to start streaming from the next log record. + */ + recordToken: string; + /** + * A filter expression that filters resources listed in the response. + * + * The expression must specify: + * + * 1. A field name. Currently filtering can be applied to the [LogRecord.logs.message.hostname] field. + * + * 2. A conditional operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + * + * 3. A value. Must be 1-63 characters long and match the regular expression `^[a-z0-9.-]{1,61}$`. + * + * Examples of a filter: + * * `message.hostname='node1.db.cloud.yandex.net'`; + * * `message.error_severity IN ("ERROR", "FATAL", "PANIC") AND message.hostname = "node1.db.cloud.yandex.net"`. + */ + filter: string; + /** Type of the service to request logs about. */ + serviceType: StreamClusterLogsRequest_ServiceType; +} + +export enum StreamClusterLogsRequest_ServiceType { + /** SERVICE_TYPE_UNSPECIFIED - Type is not specified. */ + SERVICE_TYPE_UNSPECIFIED = 0, + /** OPENSEARCH - OpenSearch logs. */ + OPENSEARCH = 1, + /** DASHBOARDS - Dashboards logs. */ + DASHBOARDS = 2, + UNRECOGNIZED = -1, +} + +export function streamClusterLogsRequest_ServiceTypeFromJSON( + object: any +): StreamClusterLogsRequest_ServiceType { + switch (object) { + case 0: + case "SERVICE_TYPE_UNSPECIFIED": + return StreamClusterLogsRequest_ServiceType.SERVICE_TYPE_UNSPECIFIED; + case 1: + case "OPENSEARCH": + return StreamClusterLogsRequest_ServiceType.OPENSEARCH; + case 2: + case "DASHBOARDS": + return StreamClusterLogsRequest_ServiceType.DASHBOARDS; + case -1: + case "UNRECOGNIZED": + default: + return StreamClusterLogsRequest_ServiceType.UNRECOGNIZED; + } +} + +export function streamClusterLogsRequest_ServiceTypeToJSON( + object: StreamClusterLogsRequest_ServiceType +): string { + switch (object) { + case StreamClusterLogsRequest_ServiceType.SERVICE_TYPE_UNSPECIFIED: + return "SERVICE_TYPE_UNSPECIFIED"; + case StreamClusterLogsRequest_ServiceType.OPENSEARCH: + return "OPENSEARCH"; + case StreamClusterLogsRequest_ServiceType.DASHBOARDS: + return "DASHBOARDS"; + default: + return "UNKNOWN"; + } +} + +export interface ListClusterOperationsRequest { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterOperationsRequest"; + /** ID of the OpenSearch cluster resource to list operations for. */ + clusterId: string; + /** + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns + * a [ListClusterOperationsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] returned by the previous list request. */ + pageToken: string; +} + +export interface ListClusterOperationsResponse { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterOperationsResponse"; + /** List of Operation resources for the specified OpenSearch cluster. */ + operations: Operation[]; + /** + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface ListClusterHostsRequest { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterHostsRequest"; + /** + * ID of the OpenSearch cluster. + * To get the OpenSearch cluster ID use a [ClusterService.List] request. + */ + clusterId: string; + /** + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns + * a [ListClusterHostsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] + * returned by the previous list request. + */ + pageToken: string; +} + +export interface ListClusterHostsResponse { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterHostsResponse"; + /** Requested list of hosts for the cluster. */ + hosts: Host[]; + /** + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] + * as the value for the [ListClusterHostsRequest.page_token] query parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface MoveClusterRequest { + $type: "yandex.cloud.mdb.opensearch.v1.MoveClusterRequest"; + /** ID of the OpenSearch cluster to move. */ + clusterId: string; + /** ID of the destination folder. */ + destinationFolderId: string; +} + +export interface MoveClusterMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.MoveClusterMetadata"; + /** ID of the OpenSearch cluster being moved. */ + clusterId: string; + /** ID of the source folder. */ + sourceFolderId: string; + /** ID of the destnation folder. */ + destinationFolderId: string; +} + +export interface StartClusterRequest { + $type: "yandex.cloud.mdb.opensearch.v1.StartClusterRequest"; + /** + * ID of the OpenSearch cluster to start. + * To get the cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; +} + +export interface StartClusterMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.StartClusterMetadata"; + /** ID of the OpenSearch cluster being started. */ + clusterId: string; +} + +export interface StopClusterRequest { + $type: "yandex.cloud.mdb.opensearch.v1.StopClusterRequest"; + /** + * ID of the OpenSearch cluster to stop. + * To get the cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; +} + +export interface StopClusterMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.StopClusterMetadata"; + /** ID of the OpenSearch cluster being stopped. */ + clusterId: string; +} + +export interface ConfigCreateSpec { + $type: "yandex.cloud.mdb.opensearch.v1.ConfigCreateSpec"; + /** OpenSearch version. */ + version: string; + /** OpenSearch admin password. */ + adminPassword: string; + /** OpenSearch configuration. */ + opensearchSpec?: OpenSearchCreateSpec; + /** Dashboards configuration. */ + dashboardsSpec?: DashboardsCreateSpec; + /** Access policy for external services. */ + access?: Access; +} + +/** OpenSearch create-time configuration. */ +export interface OpenSearchCreateSpec { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearchCreateSpec"; + /** Names of the cluster plugins. */ + plugins: string[]; + /** OpenSearch type host groups of the cluster. */ + nodeGroups: OpenSearchCreateSpec_NodeGroup[]; + opensearchConfig2?: OpenSearchConfig2 | undefined; +} + +/** Configuration of the host group. */ +export interface OpenSearchCreateSpec_NodeGroup { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearchCreateSpec.NodeGroup"; + /** Name of the group. */ + name: string; + /** Resources allocated to the hosts. */ + resources?: Resources; + /** Number of hosts in the group. */ + hostsCount: number; + /** IDs of the availability zones the hosts belong to. */ + zoneIds: string[]; + /** IDs of the subnets that the hosts belong to. */ + subnetIds: string[]; + /** Determines whether a public IP is assigned to the hosts in the group. */ + assignPublicIp: boolean; + /** Roles of the hosts in the group. */ + roles: OpenSearch_GroupRole[]; +} + +/** Dashboards create-time configuration. */ +export interface DashboardsCreateSpec { + $type: "yandex.cloud.mdb.opensearch.v1.DashboardsCreateSpec"; + /** Dashboards type host groups of the cluster. */ + nodeGroups: DashboardsCreateSpec_NodeGroup[]; +} + +export interface DashboardsCreateSpec_NodeGroup { + $type: "yandex.cloud.mdb.opensearch.v1.DashboardsCreateSpec.NodeGroup"; + /** Name of the group. */ + name: string; + /** Resources allocated to the hosts. */ + resources?: Resources; + /** Number of hosts in the group. */ + hostsCount: number; + /** IDs of the availability zones the hosts belong to. */ + zoneIds: string[]; + /** IDs of the subnets that the hosts belong to. */ + subnetIds: string[]; + /** Determines whether a public IP is assigned to the hosts in the group. */ + assignPublicIp: boolean; +} + +export interface ConfigUpdateSpec { + $type: "yandex.cloud.mdb.opensearch.v1.ConfigUpdateSpec"; + /** OpenSearch version. */ + version: string; + /** OpenSearch admin password. */ + adminPassword: string; + /** OpenSearch configuration. */ + opensearchSpec?: OpenSearchClusterUpdateSpec; + /** Dashboards configuration. */ + dashboardsSpec?: DashboardsClusterUpdateSpec; + /** Access policy for external services. */ + access?: Access; +} + +export interface OpenSearchClusterUpdateSpec { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearchClusterUpdateSpec"; + /** Names of the cluster plugins. */ + plugins: string[]; + opensearchConfig2?: OpenSearchConfig2 | undefined; +} + +/** Dashboards configuration. */ +export interface DashboardsClusterUpdateSpec { + $type: "yandex.cloud.mdb.opensearch.v1.DashboardsClusterUpdateSpec"; +} + +export interface BackupClusterRequest { + $type: "yandex.cloud.mdb.opensearch.v1.BackupClusterRequest"; + /** + * ID of the OpenSearch cluster to back up. + * + * To get the ID, use a [ClusterService.List] request. + */ + clusterId: string; +} + +export interface BackupClusterMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.BackupClusterMetadata"; + /** ID of the OpenSearch cluster being backed up. */ + clusterId: string; +} + +export interface RestoreClusterRequest { + $type: "yandex.cloud.mdb.opensearch.v1.RestoreClusterRequest"; + /** + * ID of the backup to create a new cluster from. + * + * To get the backup ID, use a [ClusterService.ListBackups] request. + */ + backupId: string; + /** Name of the new OpenSearch cluster to be created from the backup. The name must be unique within the folder. */ + name: string; + /** Description of the new OpenSearch cluster to be created from the backup. */ + description: string; + /** + * Custom labels for the new OpenSearch cluster to be created from the backup as `key:value` pairs. Maximum 64 per resource. + * For example, "project": "mvp" or "source": "dictionary". + */ + labels: { [key: string]: string }; + /** Deployment environment of the new OpenSearch cluster to be created from the backup. */ + environment: Cluster_Environment; + /** Configuration for the new OpenSearch cluster to be created from the backup. */ + configSpec?: ConfigCreateSpec; + /** ID of the network to create the cluster in. */ + networkId: string; + /** User security groups. */ + securityGroupIds: string[]; + /** ID of the service account used to access Object Storage. */ + serviceAccountId: string; + /** Determines whether the cluster is protected from being deleted. */ + deletionProtection: boolean; + /** + * ID of the folder to create the OpenSearch cluster in. + * + * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; + /** Cluster maintenance window. Should be defined by either one of the two options. */ + maintenanceWindow?: MaintenanceWindow; +} + +export interface RestoreClusterRequest_LabelsEntry { + $type: "yandex.cloud.mdb.opensearch.v1.RestoreClusterRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface RestoreClusterMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.RestoreClusterMetadata"; + /** ID of the new OpenSearch cluster being created from a backup. */ + clusterId: string; + /** ID of the backup being used for creating a cluster. */ + backupId: string; +} + +export interface RescheduleMaintenanceRequest { + $type: "yandex.cloud.mdb.opensearch.v1.RescheduleMaintenanceRequest"; + /** + * ID of the OpenSearch cluster to reschedule the maintenance operation for. + * + * To get the ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** The type of the reschedule request. */ + rescheduleType: RescheduleMaintenanceRequest_RescheduleType; + /** + * The time until which this maintenance operation should be delayed. + * The value should be ahead of the first time when the maintenance operation has been scheduled for no more than two weeks. + * The value can also point to a moment in the past if [reschedule_type.IMMEDIATE] reschedule type is selected. + */ + delayedUntil?: Date; +} + +export enum RescheduleMaintenanceRequest_RescheduleType { + /** RESCHEDULE_TYPE_UNSPECIFIED - Time of the maintenance is not specified.. */ + RESCHEDULE_TYPE_UNSPECIFIED = 0, + /** IMMEDIATE - Start the maintenance operation immediately. */ + IMMEDIATE = 1, + /** NEXT_AVAILABLE_WINDOW - Start the maintenance operation within the next available maintenance window. */ + NEXT_AVAILABLE_WINDOW = 2, + /** SPECIFIC_TIME - Start the maintenance operation at the specific time. */ + SPECIFIC_TIME = 3, + UNRECOGNIZED = -1, +} + +export function rescheduleMaintenanceRequest_RescheduleTypeFromJSON( + object: any +): RescheduleMaintenanceRequest_RescheduleType { + switch (object) { + case 0: + case "RESCHEDULE_TYPE_UNSPECIFIED": + return RescheduleMaintenanceRequest_RescheduleType.RESCHEDULE_TYPE_UNSPECIFIED; + case 1: + case "IMMEDIATE": + return RescheduleMaintenanceRequest_RescheduleType.IMMEDIATE; + case 2: + case "NEXT_AVAILABLE_WINDOW": + return RescheduleMaintenanceRequest_RescheduleType.NEXT_AVAILABLE_WINDOW; + case 3: + case "SPECIFIC_TIME": + return RescheduleMaintenanceRequest_RescheduleType.SPECIFIC_TIME; + case -1: + case "UNRECOGNIZED": + default: + return RescheduleMaintenanceRequest_RescheduleType.UNRECOGNIZED; + } +} + +export function rescheduleMaintenanceRequest_RescheduleTypeToJSON( + object: RescheduleMaintenanceRequest_RescheduleType +): string { + switch (object) { + case RescheduleMaintenanceRequest_RescheduleType.RESCHEDULE_TYPE_UNSPECIFIED: + return "RESCHEDULE_TYPE_UNSPECIFIED"; + case RescheduleMaintenanceRequest_RescheduleType.IMMEDIATE: + return "IMMEDIATE"; + case RescheduleMaintenanceRequest_RescheduleType.NEXT_AVAILABLE_WINDOW: + return "NEXT_AVAILABLE_WINDOW"; + case RescheduleMaintenanceRequest_RescheduleType.SPECIFIC_TIME: + return "SPECIFIC_TIME"; + default: + return "UNKNOWN"; + } +} + +export interface RescheduleMaintenanceMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.RescheduleMaintenanceMetadata"; + /** ID of the OpenSearch cluster where the reschedule is applied. */ + clusterId: string; + /** The time until which this maintenance operation is to be delayed. */ + delayedUntil?: Date; +} + +export interface ListClusterBackupsRequest { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterBackupsRequest"; + /** + * ID of the OpenSearch cluster. + * + * To get the ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than `page_size`, the service returns a [ListClusterBackupsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set `page_token` to the [ListClusterBackupsResponse.next_page_token] + * returned by the previous list request. + */ + pageToken: string; +} + +export interface ListClusterBackupsResponse { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterBackupsResponse"; + /** List of the OpenSearch cluster backups. */ + backups: Backup[]; + /** + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value + * for the [ListClustersRequest.page_token] parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface DeleteOpenSearchNodeGroupRequest { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteOpenSearchNodeGroupRequest"; + /** + * ID of the OpenSearch cluster to delete the OpenSearch type host group in. + * + * To get the ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** Name of the OpenSearch type host group to delete. */ + name: string; +} + +export interface UpdateOpenSearchNodeGroupRequest { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateOpenSearchNodeGroupRequest"; + /** + * ID of the OpenSearch cluster to update the OpenSearch type host group in. + * + * To get the ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** Name of the OpenSearch type host group to be updated. */ + name: string; + /** Field mask that specifies which fields of the host group configuration should be updated. */ + updateMask?: FieldMask; + /** New configuration for the host group. */ + nodeGroupSpec?: OpenSearchNodeGroupUpdateSpec; +} + +export interface OpenSearchNodeGroupUpdateSpec { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearchNodeGroupUpdateSpec"; + /** Resources allocated to the hosts. */ + resources?: Resources; + /** Number of hosts in the group. */ + hostsCount: number; + /** Roles of the host group. */ + roles: OpenSearch_GroupRole[]; +} + +export interface AddOpenSearchNodeGroupRequest { + $type: "yandex.cloud.mdb.opensearch.v1.AddOpenSearchNodeGroupRequest"; + /** + * ID of the OpenSearch cluster to create the OpenSearch type host group in. + * + * To get the ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** Configuration of the new host group. */ + nodeGroupSpec?: OpenSearchCreateSpec_NodeGroup; +} + +export interface DeleteDashboardsNodeGroupRequest { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteDashboardsNodeGroupRequest"; + /** + * ID of the OpenSearch cluster to delete the Dashboards type host group in. + * + * To get the ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** Name of the Dashboards type host group to delete. */ + name: string; +} + +export interface UpdateDashboardsNodeGroupRequest { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateDashboardsNodeGroupRequest"; + /** ID of the OpenSearch cluster to update the Dashboards type host group in. */ + clusterId: string; + /** Name of the Dashboards type host group to be updated. */ + name: string; + /** Field mask that specifies which fields of the host group configuration should be updated. */ + updateMask?: FieldMask; + /** New configuration for the host group. */ + nodeGroupSpec?: DashboardsNodeGroupUpdateSpec; +} + +export interface DashboardsNodeGroupUpdateSpec { + $type: "yandex.cloud.mdb.opensearch.v1.DashboardsNodeGroupUpdateSpec"; + /** Resources allocated to the hosts. */ + resources?: Resources; + /** Number of hosts in the group. */ + hostsCount: number; +} + +export interface AddDashboardsNodeGroupRequest { + $type: "yandex.cloud.mdb.opensearch.v1.AddDashboardsNodeGroupRequest"; + /** + * ID of the OpenSearch cluster to create the Dashboards type host group in. + * + * To get the ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** Configuration of the new host group. */ + nodeGroupSpec?: DashboardsCreateSpec_NodeGroup; +} + +export interface AddNodeGroupMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.AddNodeGroupMetadata"; + /** ID of the OpenSearch cluster where the host group is being created. */ + clusterId: string; + /** Name of the host group being created. */ + name: string; +} + +export interface UpdateNodeGroupMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateNodeGroupMetadata"; + /** ID of the OpenSearch cluster where the host group is being updated. */ + clusterId: string; + /** Name of the host group being updated. */ + name: string; +} + +export interface DeleteNodeGroupMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteNodeGroupMetadata"; + /** ID of the OpenSearch cluster where the host group is being deleted. */ + clusterId: string; + /** Name of the host group being deleted. */ + name: string; +} + +export interface GetAuthSettingsRequest { + $type: "yandex.cloud.mdb.opensearch.v1.GetAuthSettingsRequest"; + /** Required. ID of the OpenSearch cluster. */ + clusterId: string; +} + +export interface UpdateAuthSettingsRequest { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateAuthSettingsRequest"; + /** Required. ID of the OpenSearch cluster. */ + clusterId: string; + /** Required. Auth settings. */ + settings?: AuthSettings; +} + +export interface UpdateAuthSettingsMetadata { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateAuthSettingsMetadata"; + /** ID of the OpenSearch cluster. */ + clusterId: string; +} + +const baseGetClusterRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.GetClusterRequest", + clusterId: "", +}; + +export const GetClusterRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.GetClusterRequest" as const, + + encode( + message: GetClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetClusterRequest } as GetClusterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetClusterRequest { + const message = { ...baseGetClusterRequest } as GetClusterRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: GetClusterRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetClusterRequest { + const message = { ...baseGetClusterRequest } as GetClusterRequest; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetClusterRequest.$type, GetClusterRequest); + +const baseListClustersRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClustersRequest", + folderId: "", + pageSize: 0, + pageToken: "", + filter: "", +}; + +export const ListClustersRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClustersRequest" as const, + + encode( + message: ListClustersRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListClustersRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListClustersRequest } as ListClustersRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClustersRequest { + const message = { ...baseListClustersRequest } as ListClustersRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListClustersRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClustersRequest { + const message = { ...baseListClustersRequest } as ListClustersRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListClustersRequest.$type, ListClustersRequest); + +const baseListClustersResponse: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClustersResponse", + nextPageToken: "", +}; + +export const ListClustersResponse = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClustersResponse" as const, + + encode( + message: ListClustersResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.clusters) { + Cluster.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClustersResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListClustersResponse } as ListClustersResponse; + message.clusters = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusters.push(Cluster.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClustersResponse { + const message = { ...baseListClustersResponse } as ListClustersResponse; + message.clusters = (object.clusters ?? []).map((e: any) => + Cluster.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClustersResponse): unknown { + const obj: any = {}; + if (message.clusters) { + obj.clusters = message.clusters.map((e) => + e ? Cluster.toJSON(e) : undefined + ); + } else { + obj.clusters = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClustersResponse { + const message = { ...baseListClustersResponse } as ListClustersResponse; + message.clusters = + object.clusters?.map((e) => Cluster.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListClustersResponse.$type, ListClustersResponse); + +const baseCreateClusterRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.CreateClusterRequest", + folderId: "", + name: "", + description: "", + environment: 0, + networkId: "", + securityGroupIds: "", + serviceAccountId: "", + deletionProtection: false, +}; + +export const CreateClusterRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.CreateClusterRequest" as const, + + encode( + message: CreateClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateClusterRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.mdb.opensearch.v1.CreateClusterRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.environment !== 0) { + writer.uint32(40).int32(message.environment); + } + if (message.configSpec !== undefined) { + ConfigCreateSpec.encode( + message.configSpec, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.networkId !== "") { + writer.uint32(58).string(message.networkId); + } + for (const v of message.securityGroupIds) { + writer.uint32(66).string(v!); + } + if (message.serviceAccountId !== "") { + writer.uint32(74).string(message.serviceAccountId); + } + if (message.deletionProtection === true) { + writer.uint32(80).bool(message.deletionProtection); + } + if (message.maintenanceWindow !== undefined) { + MaintenanceWindow.encode( + message.maintenanceWindow, + writer.uint32(90).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateClusterRequest } as CreateClusterRequest; + message.labels = {}; + message.securityGroupIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = CreateClusterRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.environment = reader.int32() as any; + break; + case 6: + message.configSpec = ConfigCreateSpec.decode(reader, reader.uint32()); + break; + case 7: + message.networkId = reader.string(); + break; + case 8: + message.securityGroupIds.push(reader.string()); + break; + case 9: + message.serviceAccountId = reader.string(); + break; + case 10: + message.deletionProtection = reader.bool(); + break; + case 11: + message.maintenanceWindow = MaintenanceWindow.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateClusterRequest { + const message = { ...baseCreateClusterRequest } as CreateClusterRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.environment = + object.environment !== undefined && object.environment !== null + ? cluster_EnvironmentFromJSON(object.environment) + : 0; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigCreateSpec.fromJSON(object.configSpec) + : undefined; + message.networkId = + object.networkId !== undefined && object.networkId !== null + ? String(object.networkId) + : ""; + message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => + String(e) + ); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromJSON(object.maintenanceWindow) + : undefined; + return message; + }, + + toJSON(message: CreateClusterRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.environment !== undefined && + (obj.environment = cluster_EnvironmentToJSON(message.environment)); + message.configSpec !== undefined && + (obj.configSpec = message.configSpec + ? ConfigCreateSpec.toJSON(message.configSpec) + : undefined); + message.networkId !== undefined && (obj.networkId = message.networkId); + if (message.securityGroupIds) { + obj.securityGroupIds = message.securityGroupIds.map((e) => e); + } else { + obj.securityGroupIds = []; + } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + message.maintenanceWindow !== undefined && + (obj.maintenanceWindow = message.maintenanceWindow + ? MaintenanceWindow.toJSON(message.maintenanceWindow) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateClusterRequest { + const message = { ...baseCreateClusterRequest } as CreateClusterRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.environment = object.environment ?? 0; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigCreateSpec.fromPartial(object.configSpec) + : undefined; + message.networkId = object.networkId ?? ""; + message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.serviceAccountId = object.serviceAccountId ?? ""; + message.deletionProtection = object.deletionProtection ?? false; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromPartial(object.maintenanceWindow) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(CreateClusterRequest.$type, CreateClusterRequest); + +const baseCreateClusterRequest_LabelsEntry: object = { + $type: "yandex.cloud.mdb.opensearch.v1.CreateClusterRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateClusterRequest_LabelsEntry = { + $type: + "yandex.cloud.mdb.opensearch.v1.CreateClusterRequest.LabelsEntry" as const, + + encode( + message: CreateClusterRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateClusterRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateClusterRequest_LabelsEntry, + } as CreateClusterRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateClusterRequest_LabelsEntry { + const message = { + ...baseCreateClusterRequest_LabelsEntry, + } as CreateClusterRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateClusterRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateClusterRequest_LabelsEntry { + const message = { + ...baseCreateClusterRequest_LabelsEntry, + } as CreateClusterRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateClusterRequest_LabelsEntry.$type, + CreateClusterRequest_LabelsEntry +); + +const baseCreateClusterMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.CreateClusterMetadata", + clusterId: "", +}; + +export const CreateClusterMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.CreateClusterMetadata" as const, + + encode( + message: CreateClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateClusterMetadata } as CreateClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateClusterMetadata { + const message = { ...baseCreateClusterMetadata } as CreateClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: CreateClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateClusterMetadata { + const message = { ...baseCreateClusterMetadata } as CreateClusterMetadata; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateClusterMetadata.$type, CreateClusterMetadata); + +const baseUpdateClusterRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateClusterRequest", + clusterId: "", + description: "", + name: "", + securityGroupIds: "", + serviceAccountId: "", + deletionProtection: false, +}; + +export const UpdateClusterRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateClusterRequest" as const, + + encode( + message: UpdateClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateClusterRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.mdb.opensearch.v1.UpdateClusterRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.configSpec !== undefined) { + ConfigUpdateSpec.encode( + message.configSpec, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(50).string(message.name); + } + for (const v of message.securityGroupIds) { + writer.uint32(58).string(v!); + } + if (message.serviceAccountId !== "") { + writer.uint32(66).string(message.serviceAccountId); + } + if (message.deletionProtection === true) { + writer.uint32(72).bool(message.deletionProtection); + } + if (message.maintenanceWindow !== undefined) { + MaintenanceWindow.encode( + message.maintenanceWindow, + writer.uint32(82).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateClusterRequest } as UpdateClusterRequest; + message.labels = {}; + message.securityGroupIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = UpdateClusterRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.configSpec = ConfigUpdateSpec.decode(reader, reader.uint32()); + break; + case 6: + message.name = reader.string(); + break; + case 7: + message.securityGroupIds.push(reader.string()); + break; + case 8: + message.serviceAccountId = reader.string(); + break; + case 9: + message.deletionProtection = reader.bool(); + break; + case 10: + message.maintenanceWindow = MaintenanceWindow.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterRequest { + const message = { ...baseUpdateClusterRequest } as UpdateClusterRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigUpdateSpec.fromJSON(object.configSpec) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => + String(e) + ); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromJSON(object.maintenanceWindow) + : undefined; + return message; + }, + + toJSON(message: UpdateClusterRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.configSpec !== undefined && + (obj.configSpec = message.configSpec + ? ConfigUpdateSpec.toJSON(message.configSpec) + : undefined); + message.name !== undefined && (obj.name = message.name); + if (message.securityGroupIds) { + obj.securityGroupIds = message.securityGroupIds.map((e) => e); + } else { + obj.securityGroupIds = []; + } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + message.maintenanceWindow !== undefined && + (obj.maintenanceWindow = message.maintenanceWindow + ? MaintenanceWindow.toJSON(message.maintenanceWindow) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateClusterRequest { + const message = { ...baseUpdateClusterRequest } as UpdateClusterRequest; + message.clusterId = object.clusterId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigUpdateSpec.fromPartial(object.configSpec) + : undefined; + message.name = object.name ?? ""; + message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.serviceAccountId = object.serviceAccountId ?? ""; + message.deletionProtection = object.deletionProtection ?? false; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromPartial(object.maintenanceWindow) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateClusterRequest.$type, UpdateClusterRequest); + +const baseUpdateClusterRequest_LabelsEntry: object = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateClusterRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateClusterRequest_LabelsEntry = { + $type: + "yandex.cloud.mdb.opensearch.v1.UpdateClusterRequest.LabelsEntry" as const, + + encode( + message: UpdateClusterRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterRequest_LabelsEntry, + } as UpdateClusterRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterRequest_LabelsEntry { + const message = { + ...baseUpdateClusterRequest_LabelsEntry, + } as UpdateClusterRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateClusterRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateClusterRequest_LabelsEntry { + const message = { + ...baseUpdateClusterRequest_LabelsEntry, + } as UpdateClusterRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterRequest_LabelsEntry.$type, + UpdateClusterRequest_LabelsEntry +); + +const baseUpdateClusterMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateClusterMetadata", + clusterId: "", +}; + +export const UpdateClusterMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateClusterMetadata" as const, + + encode( + message: UpdateClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateClusterMetadata } as UpdateClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterMetadata { + const message = { ...baseUpdateClusterMetadata } as UpdateClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: UpdateClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateClusterMetadata { + const message = { ...baseUpdateClusterMetadata } as UpdateClusterMetadata; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateClusterMetadata.$type, UpdateClusterMetadata); + +const baseDeleteClusterRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteClusterRequest", + clusterId: "", +}; + +export const DeleteClusterRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteClusterRequest" as const, + + encode( + message: DeleteClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteClusterRequest } as DeleteClusterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteClusterRequest { + const message = { ...baseDeleteClusterRequest } as DeleteClusterRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: DeleteClusterRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteClusterRequest { + const message = { ...baseDeleteClusterRequest } as DeleteClusterRequest; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteClusterRequest.$type, DeleteClusterRequest); + +const baseDeleteClusterMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteClusterMetadata", + clusterId: "", +}; + +export const DeleteClusterMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteClusterMetadata" as const, + + encode( + message: DeleteClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteClusterMetadata } as DeleteClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteClusterMetadata { + const message = { ...baseDeleteClusterMetadata } as DeleteClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: DeleteClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteClusterMetadata { + const message = { ...baseDeleteClusterMetadata } as DeleteClusterMetadata; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteClusterMetadata.$type, DeleteClusterMetadata); + +const baseListClusterLogsRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterLogsRequest", + clusterId: "", + columnFilter: "", + pageSize: 0, + pageToken: "", + alwaysNextPageToken: false, + filter: "", + serviceType: 0, +}; + +export const ListClusterLogsRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterLogsRequest" as const, + + encode( + message: ListClusterLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.columnFilter) { + writer.uint32(18).string(v!); + } + if (message.fromTime !== undefined) { + Timestamp.encode( + toTimestamp(message.fromTime), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.toTime !== undefined) { + Timestamp.encode( + toTimestamp(message.toTime), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.pageSize !== 0) { + writer.uint32(40).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(50).string(message.pageToken); + } + if (message.alwaysNextPageToken === true) { + writer.uint32(56).bool(message.alwaysNextPageToken); + } + if (message.filter !== "") { + writer.uint32(66).string(message.filter); + } + if (message.serviceType !== 0) { + writer.uint32(72).int32(message.serviceType); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListClusterLogsRequest } as ListClusterLogsRequest; + message.columnFilter = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.columnFilter.push(reader.string()); + break; + case 3: + message.fromTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.toTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 6: + message.pageToken = reader.string(); + break; + case 7: + message.alwaysNextPageToken = reader.bool(); + break; + case 8: + message.filter = reader.string(); + break; + case 9: + message.serviceType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterLogsRequest { + const message = { ...baseListClusterLogsRequest } as ListClusterLogsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.columnFilter = (object.columnFilter ?? []).map((e: any) => + String(e) + ); + message.fromTime = + object.fromTime !== undefined && object.fromTime !== null + ? fromJsonTimestamp(object.fromTime) + : undefined; + message.toTime = + object.toTime !== undefined && object.toTime !== null + ? fromJsonTimestamp(object.toTime) + : undefined; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.alwaysNextPageToken = + object.alwaysNextPageToken !== undefined && + object.alwaysNextPageToken !== null + ? Boolean(object.alwaysNextPageToken) + : false; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + message.serviceType = + object.serviceType !== undefined && object.serviceType !== null + ? listClusterLogsRequest_ServiceTypeFromJSON(object.serviceType) + : 0; + return message; + }, + + toJSON(message: ListClusterLogsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.columnFilter) { + obj.columnFilter = message.columnFilter.map((e) => e); + } else { + obj.columnFilter = []; + } + message.fromTime !== undefined && + (obj.fromTime = message.fromTime.toISOString()); + message.toTime !== undefined && (obj.toTime = message.toTime.toISOString()); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.alwaysNextPageToken !== undefined && + (obj.alwaysNextPageToken = message.alwaysNextPageToken); + message.filter !== undefined && (obj.filter = message.filter); + message.serviceType !== undefined && + (obj.serviceType = listClusterLogsRequest_ServiceTypeToJSON( + message.serviceType + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterLogsRequest { + const message = { ...baseListClusterLogsRequest } as ListClusterLogsRequest; + message.clusterId = object.clusterId ?? ""; + message.columnFilter = object.columnFilter?.map((e) => e) || []; + message.fromTime = object.fromTime ?? undefined; + message.toTime = object.toTime ?? undefined; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.alwaysNextPageToken = object.alwaysNextPageToken ?? false; + message.filter = object.filter ?? ""; + message.serviceType = object.serviceType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ListClusterLogsRequest.$type, ListClusterLogsRequest); + +const baseLogRecord: object = { + $type: "yandex.cloud.mdb.opensearch.v1.LogRecord", +}; + +export const LogRecord = { + $type: "yandex.cloud.mdb.opensearch.v1.LogRecord" as const, + + encode( + message: LogRecord, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.timestamp !== undefined) { + Timestamp.encode( + toTimestamp(message.timestamp), + writer.uint32(10).fork() + ).ldelim(); + } + Object.entries(message.message).forEach(([key, value]) => { + LogRecord_MessageEntry.encode( + { + $type: "yandex.cloud.mdb.opensearch.v1.LogRecord.MessageEntry", + key: key as any, + value, + }, + writer.uint32(18).fork() + ).ldelim(); + }); + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LogRecord { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLogRecord } as LogRecord; + message.message = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.timestamp = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 2: + const entry2 = LogRecord_MessageEntry.decode(reader, reader.uint32()); + if (entry2.value !== undefined) { + message.message[entry2.key] = entry2.value; + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LogRecord { + const message = { ...baseLogRecord } as LogRecord; + message.timestamp = + object.timestamp !== undefined && object.timestamp !== null + ? fromJsonTimestamp(object.timestamp) + : undefined; + message.message = Object.entries(object.message ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + return message; + }, + + toJSON(message: LogRecord): unknown { + const obj: any = {}; + message.timestamp !== undefined && + (obj.timestamp = message.timestamp.toISOString()); + obj.message = {}; + if (message.message) { + Object.entries(message.message).forEach(([k, v]) => { + obj.message[k] = v; + }); + } + return obj; + }, + + fromPartial, I>>( + object: I + ): LogRecord { + const message = { ...baseLogRecord } as LogRecord; + message.timestamp = object.timestamp ?? undefined; + message.message = Object.entries(object.message ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + return message; + }, +}; + +messageTypeRegistry.set(LogRecord.$type, LogRecord); + +const baseLogRecord_MessageEntry: object = { + $type: "yandex.cloud.mdb.opensearch.v1.LogRecord.MessageEntry", + key: "", + value: "", +}; + +export const LogRecord_MessageEntry = { + $type: "yandex.cloud.mdb.opensearch.v1.LogRecord.MessageEntry" as const, + + encode( + message: LogRecord_MessageEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LogRecord_MessageEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLogRecord_MessageEntry } as LogRecord_MessageEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LogRecord_MessageEntry { + const message = { ...baseLogRecord_MessageEntry } as LogRecord_MessageEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: LogRecord_MessageEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): LogRecord_MessageEntry { + const message = { ...baseLogRecord_MessageEntry } as LogRecord_MessageEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(LogRecord_MessageEntry.$type, LogRecord_MessageEntry); + +const baseListClusterLogsResponse: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterLogsResponse", + nextPageToken: "", +}; + +export const ListClusterLogsResponse = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterLogsResponse" as const, + + encode( + message: ListClusterLogsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.logs) { + LogRecord.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterLogsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterLogsResponse, + } as ListClusterLogsResponse; + message.logs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.logs.push(LogRecord.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterLogsResponse { + const message = { + ...baseListClusterLogsResponse, + } as ListClusterLogsResponse; + message.logs = (object.logs ?? []).map((e: any) => LogRecord.fromJSON(e)); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterLogsResponse): unknown { + const obj: any = {}; + if (message.logs) { + obj.logs = message.logs.map((e) => (e ? LogRecord.toJSON(e) : undefined)); + } else { + obj.logs = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterLogsResponse { + const message = { + ...baseListClusterLogsResponse, + } as ListClusterLogsResponse; + message.logs = object.logs?.map((e) => LogRecord.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListClusterLogsResponse.$type, ListClusterLogsResponse); + +const baseStreamLogRecord: object = { + $type: "yandex.cloud.mdb.opensearch.v1.StreamLogRecord", + nextRecordToken: "", +}; + +export const StreamLogRecord = { + $type: "yandex.cloud.mdb.opensearch.v1.StreamLogRecord" as const, + + encode( + message: StreamLogRecord, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.record !== undefined) { + LogRecord.encode(message.record, writer.uint32(10).fork()).ldelim(); + } + if (message.nextRecordToken !== "") { + writer.uint32(18).string(message.nextRecordToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StreamLogRecord { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStreamLogRecord } as StreamLogRecord; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.record = LogRecord.decode(reader, reader.uint32()); + break; + case 2: + message.nextRecordToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StreamLogRecord { + const message = { ...baseStreamLogRecord } as StreamLogRecord; + message.record = + object.record !== undefined && object.record !== null + ? LogRecord.fromJSON(object.record) + : undefined; + message.nextRecordToken = + object.nextRecordToken !== undefined && object.nextRecordToken !== null + ? String(object.nextRecordToken) + : ""; + return message; + }, + + toJSON(message: StreamLogRecord): unknown { + const obj: any = {}; + message.record !== undefined && + (obj.record = message.record + ? LogRecord.toJSON(message.record) + : undefined); + message.nextRecordToken !== undefined && + (obj.nextRecordToken = message.nextRecordToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): StreamLogRecord { + const message = { ...baseStreamLogRecord } as StreamLogRecord; + message.record = + object.record !== undefined && object.record !== null + ? LogRecord.fromPartial(object.record) + : undefined; + message.nextRecordToken = object.nextRecordToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StreamLogRecord.$type, StreamLogRecord); + +const baseStreamClusterLogsRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.StreamClusterLogsRequest", + clusterId: "", + columnFilter: "", + recordToken: "", + filter: "", + serviceType: 0, +}; + +export const StreamClusterLogsRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.StreamClusterLogsRequest" as const, + + encode( + message: StreamClusterLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.columnFilter) { + writer.uint32(18).string(v!); + } + if (message.fromTime !== undefined) { + Timestamp.encode( + toTimestamp(message.fromTime), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.toTime !== undefined) { + Timestamp.encode( + toTimestamp(message.toTime), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.recordToken !== "") { + writer.uint32(42).string(message.recordToken); + } + if (message.filter !== "") { + writer.uint32(50).string(message.filter); + } + if (message.serviceType !== 0) { + writer.uint32(56).int32(message.serviceType); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): StreamClusterLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseStreamClusterLogsRequest, + } as StreamClusterLogsRequest; + message.columnFilter = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.columnFilter.push(reader.string()); + break; + case 3: + message.fromTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.toTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.recordToken = reader.string(); + break; + case 6: + message.filter = reader.string(); + break; + case 7: + message.serviceType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StreamClusterLogsRequest { + const message = { + ...baseStreamClusterLogsRequest, + } as StreamClusterLogsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.columnFilter = (object.columnFilter ?? []).map((e: any) => + String(e) + ); + message.fromTime = + object.fromTime !== undefined && object.fromTime !== null + ? fromJsonTimestamp(object.fromTime) + : undefined; + message.toTime = + object.toTime !== undefined && object.toTime !== null + ? fromJsonTimestamp(object.toTime) + : undefined; + message.recordToken = + object.recordToken !== undefined && object.recordToken !== null + ? String(object.recordToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + message.serviceType = + object.serviceType !== undefined && object.serviceType !== null + ? streamClusterLogsRequest_ServiceTypeFromJSON(object.serviceType) + : 0; + return message; + }, + + toJSON(message: StreamClusterLogsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.columnFilter) { + obj.columnFilter = message.columnFilter.map((e) => e); + } else { + obj.columnFilter = []; + } + message.fromTime !== undefined && + (obj.fromTime = message.fromTime.toISOString()); + message.toTime !== undefined && (obj.toTime = message.toTime.toISOString()); + message.recordToken !== undefined && + (obj.recordToken = message.recordToken); + message.filter !== undefined && (obj.filter = message.filter); + message.serviceType !== undefined && + (obj.serviceType = streamClusterLogsRequest_ServiceTypeToJSON( + message.serviceType + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): StreamClusterLogsRequest { + const message = { + ...baseStreamClusterLogsRequest, + } as StreamClusterLogsRequest; + message.clusterId = object.clusterId ?? ""; + message.columnFilter = object.columnFilter?.map((e) => e) || []; + message.fromTime = object.fromTime ?? undefined; + message.toTime = object.toTime ?? undefined; + message.recordToken = object.recordToken ?? ""; + message.filter = object.filter ?? ""; + message.serviceType = object.serviceType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + StreamClusterLogsRequest.$type, + StreamClusterLogsRequest +); + +const baseListClusterOperationsRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterOperationsRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListClusterOperationsRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterOperationsRequest" as const, + + encode( + message: ListClusterOperationsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterOperationsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterOperationsRequest, + } as ListClusterOperationsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterOperationsRequest { + const message = { + ...baseListClusterOperationsRequest, + } as ListClusterOperationsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterOperationsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterOperationsRequest { + const message = { + ...baseListClusterOperationsRequest, + } as ListClusterOperationsRequest; + message.clusterId = object.clusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterOperationsRequest.$type, + ListClusterOperationsRequest +); + +const baseListClusterOperationsResponse: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterOperationsResponse", + nextPageToken: "", +}; + +export const ListClusterOperationsResponse = { + $type: + "yandex.cloud.mdb.opensearch.v1.ListClusterOperationsResponse" as const, + + encode( + message: ListClusterOperationsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.operations) { + Operation.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterOperationsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterOperationsResponse, + } as ListClusterOperationsResponse; + message.operations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operations.push(Operation.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterOperationsResponse { + const message = { + ...baseListClusterOperationsResponse, + } as ListClusterOperationsResponse; + message.operations = (object.operations ?? []).map((e: any) => + Operation.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterOperationsResponse): unknown { + const obj: any = {}; + if (message.operations) { + obj.operations = message.operations.map((e) => + e ? Operation.toJSON(e) : undefined + ); + } else { + obj.operations = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterOperationsResponse { + const message = { + ...baseListClusterOperationsResponse, + } as ListClusterOperationsResponse; + message.operations = + object.operations?.map((e) => Operation.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterOperationsResponse.$type, + ListClusterOperationsResponse +); + +const baseListClusterHostsRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterHostsRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListClusterHostsRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterHostsRequest" as const, + + encode( + message: ListClusterHostsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterHostsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterHostsRequest, + } as ListClusterHostsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterHostsRequest { + const message = { + ...baseListClusterHostsRequest, + } as ListClusterHostsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterHostsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterHostsRequest { + const message = { + ...baseListClusterHostsRequest, + } as ListClusterHostsRequest; + message.clusterId = object.clusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListClusterHostsRequest.$type, ListClusterHostsRequest); + +const baseListClusterHostsResponse: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterHostsResponse", + nextPageToken: "", +}; + +export const ListClusterHostsResponse = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterHostsResponse" as const, + + encode( + message: ListClusterHostsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.hosts) { + Host.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterHostsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterHostsResponse, + } as ListClusterHostsResponse; + message.hosts = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hosts.push(Host.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterHostsResponse { + const message = { + ...baseListClusterHostsResponse, + } as ListClusterHostsResponse; + message.hosts = (object.hosts ?? []).map((e: any) => Host.fromJSON(e)); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterHostsResponse): unknown { + const obj: any = {}; + if (message.hosts) { + obj.hosts = message.hosts.map((e) => (e ? Host.toJSON(e) : undefined)); + } else { + obj.hosts = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterHostsResponse { + const message = { + ...baseListClusterHostsResponse, + } as ListClusterHostsResponse; + message.hosts = object.hosts?.map((e) => Host.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterHostsResponse.$type, + ListClusterHostsResponse +); + +const baseMoveClusterRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.MoveClusterRequest", + clusterId: "", + destinationFolderId: "", +}; + +export const MoveClusterRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.MoveClusterRequest" as const, + + encode( + message: MoveClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.destinationFolderId !== "") { + writer.uint32(18).string(message.destinationFolderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MoveClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveClusterRequest } as MoveClusterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveClusterRequest { + const message = { ...baseMoveClusterRequest } as MoveClusterRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveClusterRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveClusterRequest { + const message = { ...baseMoveClusterRequest } as MoveClusterRequest; + message.clusterId = object.clusterId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveClusterRequest.$type, MoveClusterRequest); + +const baseMoveClusterMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.MoveClusterMetadata", + clusterId: "", + sourceFolderId: "", + destinationFolderId: "", +}; + +export const MoveClusterMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.MoveClusterMetadata" as const, + + encode( + message: MoveClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.sourceFolderId !== "") { + writer.uint32(18).string(message.sourceFolderId); + } + if (message.destinationFolderId !== "") { + writer.uint32(26).string(message.destinationFolderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MoveClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveClusterMetadata } as MoveClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.sourceFolderId = reader.string(); + break; + case 3: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveClusterMetadata { + const message = { ...baseMoveClusterMetadata } as MoveClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.sourceFolderId = + object.sourceFolderId !== undefined && object.sourceFolderId !== null + ? String(object.sourceFolderId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.sourceFolderId !== undefined && + (obj.sourceFolderId = message.sourceFolderId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveClusterMetadata { + const message = { ...baseMoveClusterMetadata } as MoveClusterMetadata; + message.clusterId = object.clusterId ?? ""; + message.sourceFolderId = object.sourceFolderId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveClusterMetadata.$type, MoveClusterMetadata); + +const baseStartClusterRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.StartClusterRequest", + clusterId: "", +}; + +export const StartClusterRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.StartClusterRequest" as const, + + encode( + message: StartClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StartClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStartClusterRequest } as StartClusterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StartClusterRequest { + const message = { ...baseStartClusterRequest } as StartClusterRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: StartClusterRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): StartClusterRequest { + const message = { ...baseStartClusterRequest } as StartClusterRequest; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StartClusterRequest.$type, StartClusterRequest); + +const baseStartClusterMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.StartClusterMetadata", + clusterId: "", +}; + +export const StartClusterMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.StartClusterMetadata" as const, + + encode( + message: StartClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): StartClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStartClusterMetadata } as StartClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StartClusterMetadata { + const message = { ...baseStartClusterMetadata } as StartClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: StartClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): StartClusterMetadata { + const message = { ...baseStartClusterMetadata } as StartClusterMetadata; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StartClusterMetadata.$type, StartClusterMetadata); + +const baseStopClusterRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.StopClusterRequest", + clusterId: "", +}; + +export const StopClusterRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.StopClusterRequest" as const, + + encode( + message: StopClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StopClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStopClusterRequest } as StopClusterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StopClusterRequest { + const message = { ...baseStopClusterRequest } as StopClusterRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: StopClusterRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): StopClusterRequest { + const message = { ...baseStopClusterRequest } as StopClusterRequest; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StopClusterRequest.$type, StopClusterRequest); + +const baseStopClusterMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.StopClusterMetadata", + clusterId: "", +}; + +export const StopClusterMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.StopClusterMetadata" as const, + + encode( + message: StopClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StopClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStopClusterMetadata } as StopClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StopClusterMetadata { + const message = { ...baseStopClusterMetadata } as StopClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: StopClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): StopClusterMetadata { + const message = { ...baseStopClusterMetadata } as StopClusterMetadata; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StopClusterMetadata.$type, StopClusterMetadata); + +const baseConfigCreateSpec: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ConfigCreateSpec", + version: "", + adminPassword: "", +}; + +export const ConfigCreateSpec = { + $type: "yandex.cloud.mdb.opensearch.v1.ConfigCreateSpec" as const, + + encode( + message: ConfigCreateSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.version !== "") { + writer.uint32(10).string(message.version); + } + if (message.adminPassword !== "") { + writer.uint32(18).string(message.adminPassword); + } + if (message.opensearchSpec !== undefined) { + OpenSearchCreateSpec.encode( + message.opensearchSpec, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.dashboardsSpec !== undefined) { + DashboardsCreateSpec.encode( + message.dashboardsSpec, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.access !== undefined) { + Access.encode(message.access, writer.uint32(42).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ConfigCreateSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConfigCreateSpec } as ConfigCreateSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.string(); + break; + case 2: + message.adminPassword = reader.string(); + break; + case 3: + message.opensearchSpec = OpenSearchCreateSpec.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.dashboardsSpec = DashboardsCreateSpec.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.access = Access.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ConfigCreateSpec { + const message = { ...baseConfigCreateSpec } as ConfigCreateSpec; + message.version = + object.version !== undefined && object.version !== null + ? String(object.version) + : ""; + message.adminPassword = + object.adminPassword !== undefined && object.adminPassword !== null + ? String(object.adminPassword) + : ""; + message.opensearchSpec = + object.opensearchSpec !== undefined && object.opensearchSpec !== null + ? OpenSearchCreateSpec.fromJSON(object.opensearchSpec) + : undefined; + message.dashboardsSpec = + object.dashboardsSpec !== undefined && object.dashboardsSpec !== null + ? DashboardsCreateSpec.fromJSON(object.dashboardsSpec) + : undefined; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromJSON(object.access) + : undefined; + return message; + }, + + toJSON(message: ConfigCreateSpec): unknown { + const obj: any = {}; + message.version !== undefined && (obj.version = message.version); + message.adminPassword !== undefined && + (obj.adminPassword = message.adminPassword); + message.opensearchSpec !== undefined && + (obj.opensearchSpec = message.opensearchSpec + ? OpenSearchCreateSpec.toJSON(message.opensearchSpec) + : undefined); + message.dashboardsSpec !== undefined && + (obj.dashboardsSpec = message.dashboardsSpec + ? DashboardsCreateSpec.toJSON(message.dashboardsSpec) + : undefined); + message.access !== undefined && + (obj.access = message.access ? Access.toJSON(message.access) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ConfigCreateSpec { + const message = { ...baseConfigCreateSpec } as ConfigCreateSpec; + message.version = object.version ?? ""; + message.adminPassword = object.adminPassword ?? ""; + message.opensearchSpec = + object.opensearchSpec !== undefined && object.opensearchSpec !== null + ? OpenSearchCreateSpec.fromPartial(object.opensearchSpec) + : undefined; + message.dashboardsSpec = + object.dashboardsSpec !== undefined && object.dashboardsSpec !== null + ? DashboardsCreateSpec.fromPartial(object.dashboardsSpec) + : undefined; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromPartial(object.access) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ConfigCreateSpec.$type, ConfigCreateSpec); + +const baseOpenSearchCreateSpec: object = { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearchCreateSpec", + plugins: "", +}; + +export const OpenSearchCreateSpec = { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearchCreateSpec" as const, + + encode( + message: OpenSearchCreateSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.plugins) { + writer.uint32(10).string(v!); + } + for (const v of message.nodeGroups) { + OpenSearchCreateSpec_NodeGroup.encode( + v!, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.opensearchConfig2 !== undefined) { + OpenSearchConfig2.encode( + message.opensearchConfig2, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): OpenSearchCreateSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOpenSearchCreateSpec } as OpenSearchCreateSpec; + message.plugins = []; + message.nodeGroups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.plugins.push(reader.string()); + break; + case 2: + message.nodeGroups.push( + OpenSearchCreateSpec_NodeGroup.decode(reader, reader.uint32()) + ); + break; + case 3: + message.opensearchConfig2 = OpenSearchConfig2.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OpenSearchCreateSpec { + const message = { ...baseOpenSearchCreateSpec } as OpenSearchCreateSpec; + message.plugins = (object.plugins ?? []).map((e: any) => String(e)); + message.nodeGroups = (object.nodeGroups ?? []).map((e: any) => + OpenSearchCreateSpec_NodeGroup.fromJSON(e) + ); + message.opensearchConfig2 = + object.opensearchConfig_2 !== undefined && + object.opensearchConfig_2 !== null + ? OpenSearchConfig2.fromJSON(object.opensearchConfig_2) + : undefined; + return message; + }, + + toJSON(message: OpenSearchCreateSpec): unknown { + const obj: any = {}; + if (message.plugins) { + obj.plugins = message.plugins.map((e) => e); + } else { + obj.plugins = []; + } + if (message.nodeGroups) { + obj.nodeGroups = message.nodeGroups.map((e) => + e ? OpenSearchCreateSpec_NodeGroup.toJSON(e) : undefined + ); + } else { + obj.nodeGroups = []; + } + message.opensearchConfig2 !== undefined && + (obj.opensearchConfig_2 = message.opensearchConfig2 + ? OpenSearchConfig2.toJSON(message.opensearchConfig2) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): OpenSearchCreateSpec { + const message = { ...baseOpenSearchCreateSpec } as OpenSearchCreateSpec; + message.plugins = object.plugins?.map((e) => e) || []; + message.nodeGroups = + object.nodeGroups?.map((e) => + OpenSearchCreateSpec_NodeGroup.fromPartial(e) + ) || []; + message.opensearchConfig2 = + object.opensearchConfig2 !== undefined && + object.opensearchConfig2 !== null + ? OpenSearchConfig2.fromPartial(object.opensearchConfig2) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(OpenSearchCreateSpec.$type, OpenSearchCreateSpec); + +const baseOpenSearchCreateSpec_NodeGroup: object = { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearchCreateSpec.NodeGroup", + name: "", + hostsCount: 0, + zoneIds: "", + subnetIds: "", + assignPublicIp: false, + roles: 0, +}; + +export const OpenSearchCreateSpec_NodeGroup = { + $type: + "yandex.cloud.mdb.opensearch.v1.OpenSearchCreateSpec.NodeGroup" as const, + + encode( + message: OpenSearchCreateSpec_NodeGroup, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + if (message.hostsCount !== 0) { + writer.uint32(24).int64(message.hostsCount); + } + for (const v of message.zoneIds) { + writer.uint32(34).string(v!); + } + for (const v of message.subnetIds) { + writer.uint32(42).string(v!); + } + if (message.assignPublicIp === true) { + writer.uint32(48).bool(message.assignPublicIp); + } + writer.uint32(58).fork(); + for (const v of message.roles) { + writer.int32(v); + } + writer.ldelim(); + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): OpenSearchCreateSpec_NodeGroup { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseOpenSearchCreateSpec_NodeGroup, + } as OpenSearchCreateSpec_NodeGroup; + message.zoneIds = []; + message.subnetIds = []; + message.roles = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + case 3: + message.hostsCount = longToNumber(reader.int64() as Long); + break; + case 4: + message.zoneIds.push(reader.string()); + break; + case 5: + message.subnetIds.push(reader.string()); + break; + case 6: + message.assignPublicIp = reader.bool(); + break; + case 7: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.roles.push(reader.int32() as any); + } + } else { + message.roles.push(reader.int32() as any); + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OpenSearchCreateSpec_NodeGroup { + const message = { + ...baseOpenSearchCreateSpec_NodeGroup, + } as OpenSearchCreateSpec_NodeGroup; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + message.hostsCount = + object.hostsCount !== undefined && object.hostsCount !== null + ? Number(object.hostsCount) + : 0; + message.zoneIds = (object.zoneIds ?? []).map((e: any) => String(e)); + message.subnetIds = (object.subnetIds ?? []).map((e: any) => String(e)); + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; + message.roles = (object.roles ?? []).map((e: any) => + openSearch_GroupRoleFromJSON(e) + ); + return message; + }, + + toJSON(message: OpenSearchCreateSpec_NodeGroup): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + message.hostsCount !== undefined && + (obj.hostsCount = Math.round(message.hostsCount)); + if (message.zoneIds) { + obj.zoneIds = message.zoneIds.map((e) => e); + } else { + obj.zoneIds = []; + } + if (message.subnetIds) { + obj.subnetIds = message.subnetIds.map((e) => e); + } else { + obj.subnetIds = []; + } + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + if (message.roles) { + obj.roles = message.roles.map((e) => openSearch_GroupRoleToJSON(e)); + } else { + obj.roles = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): OpenSearchCreateSpec_NodeGroup { + const message = { + ...baseOpenSearchCreateSpec_NodeGroup, + } as OpenSearchCreateSpec_NodeGroup; + message.name = object.name ?? ""; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.hostsCount = object.hostsCount ?? 0; + message.zoneIds = object.zoneIds?.map((e) => e) || []; + message.subnetIds = object.subnetIds?.map((e) => e) || []; + message.assignPublicIp = object.assignPublicIp ?? false; + message.roles = object.roles?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + OpenSearchCreateSpec_NodeGroup.$type, + OpenSearchCreateSpec_NodeGroup +); + +const baseDashboardsCreateSpec: object = { + $type: "yandex.cloud.mdb.opensearch.v1.DashboardsCreateSpec", +}; + +export const DashboardsCreateSpec = { + $type: "yandex.cloud.mdb.opensearch.v1.DashboardsCreateSpec" as const, + + encode( + message: DashboardsCreateSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.nodeGroups) { + DashboardsCreateSpec_NodeGroup.encode( + v!, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DashboardsCreateSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDashboardsCreateSpec } as DashboardsCreateSpec; + message.nodeGroups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.nodeGroups.push( + DashboardsCreateSpec_NodeGroup.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DashboardsCreateSpec { + const message = { ...baseDashboardsCreateSpec } as DashboardsCreateSpec; + message.nodeGroups = (object.nodeGroups ?? []).map((e: any) => + DashboardsCreateSpec_NodeGroup.fromJSON(e) + ); + return message; + }, + + toJSON(message: DashboardsCreateSpec): unknown { + const obj: any = {}; + if (message.nodeGroups) { + obj.nodeGroups = message.nodeGroups.map((e) => + e ? DashboardsCreateSpec_NodeGroup.toJSON(e) : undefined + ); + } else { + obj.nodeGroups = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): DashboardsCreateSpec { + const message = { ...baseDashboardsCreateSpec } as DashboardsCreateSpec; + message.nodeGroups = + object.nodeGroups?.map((e) => + DashboardsCreateSpec_NodeGroup.fromPartial(e) + ) || []; + return message; + }, +}; + +messageTypeRegistry.set(DashboardsCreateSpec.$type, DashboardsCreateSpec); + +const baseDashboardsCreateSpec_NodeGroup: object = { + $type: "yandex.cloud.mdb.opensearch.v1.DashboardsCreateSpec.NodeGroup", + name: "", + hostsCount: 0, + zoneIds: "", + subnetIds: "", + assignPublicIp: false, +}; + +export const DashboardsCreateSpec_NodeGroup = { + $type: + "yandex.cloud.mdb.opensearch.v1.DashboardsCreateSpec.NodeGroup" as const, + + encode( + message: DashboardsCreateSpec_NodeGroup, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + if (message.hostsCount !== 0) { + writer.uint32(24).int64(message.hostsCount); + } + for (const v of message.zoneIds) { + writer.uint32(34).string(v!); + } + for (const v of message.subnetIds) { + writer.uint32(42).string(v!); + } + if (message.assignPublicIp === true) { + writer.uint32(48).bool(message.assignPublicIp); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DashboardsCreateSpec_NodeGroup { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDashboardsCreateSpec_NodeGroup, + } as DashboardsCreateSpec_NodeGroup; + message.zoneIds = []; + message.subnetIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + case 3: + message.hostsCount = longToNumber(reader.int64() as Long); + break; + case 4: + message.zoneIds.push(reader.string()); + break; + case 5: + message.subnetIds.push(reader.string()); + break; + case 6: + message.assignPublicIp = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DashboardsCreateSpec_NodeGroup { + const message = { + ...baseDashboardsCreateSpec_NodeGroup, + } as DashboardsCreateSpec_NodeGroup; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + message.hostsCount = + object.hostsCount !== undefined && object.hostsCount !== null + ? Number(object.hostsCount) + : 0; + message.zoneIds = (object.zoneIds ?? []).map((e: any) => String(e)); + message.subnetIds = (object.subnetIds ?? []).map((e: any) => String(e)); + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; + return message; + }, + + toJSON(message: DashboardsCreateSpec_NodeGroup): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + message.hostsCount !== undefined && + (obj.hostsCount = Math.round(message.hostsCount)); + if (message.zoneIds) { + obj.zoneIds = message.zoneIds.map((e) => e); + } else { + obj.zoneIds = []; + } + if (message.subnetIds) { + obj.subnetIds = message.subnetIds.map((e) => e); + } else { + obj.subnetIds = []; + } + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + return obj; + }, + + fromPartial, I>>( + object: I + ): DashboardsCreateSpec_NodeGroup { + const message = { + ...baseDashboardsCreateSpec_NodeGroup, + } as DashboardsCreateSpec_NodeGroup; + message.name = object.name ?? ""; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.hostsCount = object.hostsCount ?? 0; + message.zoneIds = object.zoneIds?.map((e) => e) || []; + message.subnetIds = object.subnetIds?.map((e) => e) || []; + message.assignPublicIp = object.assignPublicIp ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + DashboardsCreateSpec_NodeGroup.$type, + DashboardsCreateSpec_NodeGroup +); + +const baseConfigUpdateSpec: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ConfigUpdateSpec", + version: "", + adminPassword: "", +}; + +export const ConfigUpdateSpec = { + $type: "yandex.cloud.mdb.opensearch.v1.ConfigUpdateSpec" as const, + + encode( + message: ConfigUpdateSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.version !== "") { + writer.uint32(10).string(message.version); + } + if (message.adminPassword !== "") { + writer.uint32(18).string(message.adminPassword); + } + if (message.opensearchSpec !== undefined) { + OpenSearchClusterUpdateSpec.encode( + message.opensearchSpec, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.dashboardsSpec !== undefined) { + DashboardsClusterUpdateSpec.encode( + message.dashboardsSpec, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.access !== undefined) { + Access.encode(message.access, writer.uint32(42).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ConfigUpdateSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConfigUpdateSpec } as ConfigUpdateSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.string(); + break; + case 2: + message.adminPassword = reader.string(); + break; + case 3: + message.opensearchSpec = OpenSearchClusterUpdateSpec.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.dashboardsSpec = DashboardsClusterUpdateSpec.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.access = Access.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ConfigUpdateSpec { + const message = { ...baseConfigUpdateSpec } as ConfigUpdateSpec; + message.version = + object.version !== undefined && object.version !== null + ? String(object.version) + : ""; + message.adminPassword = + object.adminPassword !== undefined && object.adminPassword !== null + ? String(object.adminPassword) + : ""; + message.opensearchSpec = + object.opensearchSpec !== undefined && object.opensearchSpec !== null + ? OpenSearchClusterUpdateSpec.fromJSON(object.opensearchSpec) + : undefined; + message.dashboardsSpec = + object.dashboardsSpec !== undefined && object.dashboardsSpec !== null + ? DashboardsClusterUpdateSpec.fromJSON(object.dashboardsSpec) + : undefined; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromJSON(object.access) + : undefined; + return message; + }, + + toJSON(message: ConfigUpdateSpec): unknown { + const obj: any = {}; + message.version !== undefined && (obj.version = message.version); + message.adminPassword !== undefined && + (obj.adminPassword = message.adminPassword); + message.opensearchSpec !== undefined && + (obj.opensearchSpec = message.opensearchSpec + ? OpenSearchClusterUpdateSpec.toJSON(message.opensearchSpec) + : undefined); + message.dashboardsSpec !== undefined && + (obj.dashboardsSpec = message.dashboardsSpec + ? DashboardsClusterUpdateSpec.toJSON(message.dashboardsSpec) + : undefined); + message.access !== undefined && + (obj.access = message.access ? Access.toJSON(message.access) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ConfigUpdateSpec { + const message = { ...baseConfigUpdateSpec } as ConfigUpdateSpec; + message.version = object.version ?? ""; + message.adminPassword = object.adminPassword ?? ""; + message.opensearchSpec = + object.opensearchSpec !== undefined && object.opensearchSpec !== null + ? OpenSearchClusterUpdateSpec.fromPartial(object.opensearchSpec) + : undefined; + message.dashboardsSpec = + object.dashboardsSpec !== undefined && object.dashboardsSpec !== null + ? DashboardsClusterUpdateSpec.fromPartial(object.dashboardsSpec) + : undefined; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromPartial(object.access) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ConfigUpdateSpec.$type, ConfigUpdateSpec); + +const baseOpenSearchClusterUpdateSpec: object = { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearchClusterUpdateSpec", + plugins: "", +}; + +export const OpenSearchClusterUpdateSpec = { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearchClusterUpdateSpec" as const, + + encode( + message: OpenSearchClusterUpdateSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.plugins) { + writer.uint32(10).string(v!); + } + if (message.opensearchConfig2 !== undefined) { + OpenSearchConfig2.encode( + message.opensearchConfig2, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): OpenSearchClusterUpdateSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseOpenSearchClusterUpdateSpec, + } as OpenSearchClusterUpdateSpec; + message.plugins = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.plugins.push(reader.string()); + break; + case 2: + message.opensearchConfig2 = OpenSearchConfig2.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OpenSearchClusterUpdateSpec { + const message = { + ...baseOpenSearchClusterUpdateSpec, + } as OpenSearchClusterUpdateSpec; + message.plugins = (object.plugins ?? []).map((e: any) => String(e)); + message.opensearchConfig2 = + object.opensearchConfig_2 !== undefined && + object.opensearchConfig_2 !== null + ? OpenSearchConfig2.fromJSON(object.opensearchConfig_2) + : undefined; + return message; + }, + + toJSON(message: OpenSearchClusterUpdateSpec): unknown { + const obj: any = {}; + if (message.plugins) { + obj.plugins = message.plugins.map((e) => e); + } else { + obj.plugins = []; + } + message.opensearchConfig2 !== undefined && + (obj.opensearchConfig_2 = message.opensearchConfig2 + ? OpenSearchConfig2.toJSON(message.opensearchConfig2) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): OpenSearchClusterUpdateSpec { + const message = { + ...baseOpenSearchClusterUpdateSpec, + } as OpenSearchClusterUpdateSpec; + message.plugins = object.plugins?.map((e) => e) || []; + message.opensearchConfig2 = + object.opensearchConfig2 !== undefined && + object.opensearchConfig2 !== null + ? OpenSearchConfig2.fromPartial(object.opensearchConfig2) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + OpenSearchClusterUpdateSpec.$type, + OpenSearchClusterUpdateSpec +); + +const baseDashboardsClusterUpdateSpec: object = { + $type: "yandex.cloud.mdb.opensearch.v1.DashboardsClusterUpdateSpec", +}; + +export const DashboardsClusterUpdateSpec = { + $type: "yandex.cloud.mdb.opensearch.v1.DashboardsClusterUpdateSpec" as const, + + encode( + _: DashboardsClusterUpdateSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DashboardsClusterUpdateSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDashboardsClusterUpdateSpec, + } as DashboardsClusterUpdateSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): DashboardsClusterUpdateSpec { + const message = { + ...baseDashboardsClusterUpdateSpec, + } as DashboardsClusterUpdateSpec; + return message; + }, + + toJSON(_: DashboardsClusterUpdateSpec): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): DashboardsClusterUpdateSpec { + const message = { + ...baseDashboardsClusterUpdateSpec, + } as DashboardsClusterUpdateSpec; + return message; + }, +}; + +messageTypeRegistry.set( + DashboardsClusterUpdateSpec.$type, + DashboardsClusterUpdateSpec +); + +const baseBackupClusterRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.BackupClusterRequest", + clusterId: "", +}; + +export const BackupClusterRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.BackupClusterRequest" as const, + + encode( + message: BackupClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): BackupClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackupClusterRequest } as BackupClusterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BackupClusterRequest { + const message = { ...baseBackupClusterRequest } as BackupClusterRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: BackupClusterRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): BackupClusterRequest { + const message = { ...baseBackupClusterRequest } as BackupClusterRequest; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(BackupClusterRequest.$type, BackupClusterRequest); + +const baseBackupClusterMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.BackupClusterMetadata", + clusterId: "", +}; + +export const BackupClusterMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.BackupClusterMetadata" as const, + + encode( + message: BackupClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): BackupClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackupClusterMetadata } as BackupClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BackupClusterMetadata { + const message = { ...baseBackupClusterMetadata } as BackupClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: BackupClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): BackupClusterMetadata { + const message = { ...baseBackupClusterMetadata } as BackupClusterMetadata; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(BackupClusterMetadata.$type, BackupClusterMetadata); + +const baseRestoreClusterRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.RestoreClusterRequest", + backupId: "", + name: "", + description: "", + environment: 0, + networkId: "", + securityGroupIds: "", + serviceAccountId: "", + deletionProtection: false, + folderId: "", +}; + +export const RestoreClusterRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.RestoreClusterRequest" as const, + + encode( + message: RestoreClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + RestoreClusterRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.mdb.opensearch.v1.RestoreClusterRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.environment !== 0) { + writer.uint32(40).int32(message.environment); + } + if (message.configSpec !== undefined) { + ConfigCreateSpec.encode( + message.configSpec, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.networkId !== "") { + writer.uint32(58).string(message.networkId); + } + for (const v of message.securityGroupIds) { + writer.uint32(66).string(v!); + } + if (message.serviceAccountId !== "") { + writer.uint32(74).string(message.serviceAccountId); + } + if (message.deletionProtection === true) { + writer.uint32(80).bool(message.deletionProtection); + } + if (message.folderId !== "") { + writer.uint32(90).string(message.folderId); + } + if (message.maintenanceWindow !== undefined) { + MaintenanceWindow.encode( + message.maintenanceWindow, + writer.uint32(98).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.labels = {}; + message.securityGroupIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = RestoreClusterRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.environment = reader.int32() as any; + break; + case 6: + message.configSpec = ConfigCreateSpec.decode(reader, reader.uint32()); + break; + case 7: + message.networkId = reader.string(); + break; + case 8: + message.securityGroupIds.push(reader.string()); + break; + case 9: + message.serviceAccountId = reader.string(); + break; + case 10: + message.deletionProtection = reader.bool(); + break; + case 11: + message.folderId = reader.string(); + break; + case 12: + message.maintenanceWindow = MaintenanceWindow.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterRequest { + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.environment = + object.environment !== undefined && object.environment !== null + ? cluster_EnvironmentFromJSON(object.environment) + : 0; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigCreateSpec.fromJSON(object.configSpec) + : undefined; + message.networkId = + object.networkId !== undefined && object.networkId !== null + ? String(object.networkId) + : ""; + message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => + String(e) + ); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromJSON(object.maintenanceWindow) + : undefined; + return message; + }, + + toJSON(message: RestoreClusterRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.environment !== undefined && + (obj.environment = cluster_EnvironmentToJSON(message.environment)); + message.configSpec !== undefined && + (obj.configSpec = message.configSpec + ? ConfigCreateSpec.toJSON(message.configSpec) + : undefined); + message.networkId !== undefined && (obj.networkId = message.networkId); + if (message.securityGroupIds) { + obj.securityGroupIds = message.securityGroupIds.map((e) => e); + } else { + obj.securityGroupIds = []; + } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.maintenanceWindow !== undefined && + (obj.maintenanceWindow = message.maintenanceWindow + ? MaintenanceWindow.toJSON(message.maintenanceWindow) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreClusterRequest { + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.backupId = object.backupId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.environment = object.environment ?? 0; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigCreateSpec.fromPartial(object.configSpec) + : undefined; + message.networkId = object.networkId ?? ""; + message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.serviceAccountId = object.serviceAccountId ?? ""; + message.deletionProtection = object.deletionProtection ?? false; + message.folderId = object.folderId ?? ""; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromPartial(object.maintenanceWindow) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(RestoreClusterRequest.$type, RestoreClusterRequest); + +const baseRestoreClusterRequest_LabelsEntry: object = { + $type: "yandex.cloud.mdb.opensearch.v1.RestoreClusterRequest.LabelsEntry", + key: "", + value: "", +}; + +export const RestoreClusterRequest_LabelsEntry = { + $type: + "yandex.cloud.mdb.opensearch.v1.RestoreClusterRequest.LabelsEntry" as const, + + encode( + message: RestoreClusterRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterRequest_LabelsEntry { + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: RestoreClusterRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): RestoreClusterRequest_LabelsEntry { + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + RestoreClusterRequest_LabelsEntry.$type, + RestoreClusterRequest_LabelsEntry +); + +const baseRestoreClusterMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.RestoreClusterMetadata", + clusterId: "", + backupId: "", +}; + +export const RestoreClusterMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.RestoreClusterMetadata" as const, + + encode( + message: RestoreClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterMetadata { + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: RestoreClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreClusterMetadata { + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + message.clusterId = object.clusterId ?? ""; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RestoreClusterMetadata.$type, RestoreClusterMetadata); + +const baseRescheduleMaintenanceRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.RescheduleMaintenanceRequest", + clusterId: "", + rescheduleType: 0, +}; + +export const RescheduleMaintenanceRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.RescheduleMaintenanceRequest" as const, + + encode( + message: RescheduleMaintenanceRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.rescheduleType !== 0) { + writer.uint32(16).int32(message.rescheduleType); + } + if (message.delayedUntil !== undefined) { + Timestamp.encode( + toTimestamp(message.delayedUntil), + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RescheduleMaintenanceRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRescheduleMaintenanceRequest, + } as RescheduleMaintenanceRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.rescheduleType = reader.int32() as any; + break; + case 3: + message.delayedUntil = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RescheduleMaintenanceRequest { + const message = { + ...baseRescheduleMaintenanceRequest, + } as RescheduleMaintenanceRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.rescheduleType = + object.rescheduleType !== undefined && object.rescheduleType !== null + ? rescheduleMaintenanceRequest_RescheduleTypeFromJSON( + object.rescheduleType + ) + : 0; + message.delayedUntil = + object.delayedUntil !== undefined && object.delayedUntil !== null + ? fromJsonTimestamp(object.delayedUntil) + : undefined; + return message; + }, + + toJSON(message: RescheduleMaintenanceRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.rescheduleType !== undefined && + (obj.rescheduleType = rescheduleMaintenanceRequest_RescheduleTypeToJSON( + message.rescheduleType + )); + message.delayedUntil !== undefined && + (obj.delayedUntil = message.delayedUntil.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): RescheduleMaintenanceRequest { + const message = { + ...baseRescheduleMaintenanceRequest, + } as RescheduleMaintenanceRequest; + message.clusterId = object.clusterId ?? ""; + message.rescheduleType = object.rescheduleType ?? 0; + message.delayedUntil = object.delayedUntil ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + RescheduleMaintenanceRequest.$type, + RescheduleMaintenanceRequest +); + +const baseRescheduleMaintenanceMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.RescheduleMaintenanceMetadata", + clusterId: "", +}; + +export const RescheduleMaintenanceMetadata = { + $type: + "yandex.cloud.mdb.opensearch.v1.RescheduleMaintenanceMetadata" as const, + + encode( + message: RescheduleMaintenanceMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.delayedUntil !== undefined) { + Timestamp.encode( + toTimestamp(message.delayedUntil), + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RescheduleMaintenanceMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRescheduleMaintenanceMetadata, + } as RescheduleMaintenanceMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 4: + message.delayedUntil = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RescheduleMaintenanceMetadata { + const message = { + ...baseRescheduleMaintenanceMetadata, + } as RescheduleMaintenanceMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.delayedUntil = + object.delayedUntil !== undefined && object.delayedUntil !== null + ? fromJsonTimestamp(object.delayedUntil) + : undefined; + return message; + }, + + toJSON(message: RescheduleMaintenanceMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.delayedUntil !== undefined && + (obj.delayedUntil = message.delayedUntil.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): RescheduleMaintenanceMetadata { + const message = { + ...baseRescheduleMaintenanceMetadata, + } as RescheduleMaintenanceMetadata; + message.clusterId = object.clusterId ?? ""; + message.delayedUntil = object.delayedUntil ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + RescheduleMaintenanceMetadata.$type, + RescheduleMaintenanceMetadata +); + +const baseListClusterBackupsRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterBackupsRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListClusterBackupsRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterBackupsRequest" as const, + + encode( + message: ListClusterBackupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterBackupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterBackupsRequest { + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterBackupsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterBackupsRequest { + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + message.clusterId = object.clusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterBackupsRequest.$type, + ListClusterBackupsRequest +); + +const baseListClusterBackupsResponse: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterBackupsResponse", + nextPageToken: "", +}; + +export const ListClusterBackupsResponse = { + $type: "yandex.cloud.mdb.opensearch.v1.ListClusterBackupsResponse" as const, + + encode( + message: ListClusterBackupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backups) { + Backup.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterBackupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backups.push(Backup.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterBackupsResponse { + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = (object.backups ?? []).map((e: any) => + Backup.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterBackupsResponse): unknown { + const obj: any = {}; + if (message.backups) { + obj.backups = message.backups.map((e) => + e ? Backup.toJSON(e) : undefined + ); + } else { + obj.backups = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterBackupsResponse { + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = object.backups?.map((e) => Backup.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterBackupsResponse.$type, + ListClusterBackupsResponse +); + +const baseDeleteOpenSearchNodeGroupRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteOpenSearchNodeGroupRequest", + clusterId: "", + name: "", +}; + +export const DeleteOpenSearchNodeGroupRequest = { + $type: + "yandex.cloud.mdb.opensearch.v1.DeleteOpenSearchNodeGroupRequest" as const, + + encode( + message: DeleteOpenSearchNodeGroupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteOpenSearchNodeGroupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteOpenSearchNodeGroupRequest, + } as DeleteOpenSearchNodeGroupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteOpenSearchNodeGroupRequest { + const message = { + ...baseDeleteOpenSearchNodeGroupRequest, + } as DeleteOpenSearchNodeGroupRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: DeleteOpenSearchNodeGroupRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): DeleteOpenSearchNodeGroupRequest { + const message = { + ...baseDeleteOpenSearchNodeGroupRequest, + } as DeleteOpenSearchNodeGroupRequest; + message.clusterId = object.clusterId ?? ""; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteOpenSearchNodeGroupRequest.$type, + DeleteOpenSearchNodeGroupRequest +); + +const baseUpdateOpenSearchNodeGroupRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateOpenSearchNodeGroupRequest", + clusterId: "", + name: "", +}; + +export const UpdateOpenSearchNodeGroupRequest = { + $type: + "yandex.cloud.mdb.opensearch.v1.UpdateOpenSearchNodeGroupRequest" as const, + + encode( + message: UpdateOpenSearchNodeGroupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(26).fork()).ldelim(); + } + if (message.nodeGroupSpec !== undefined) { + OpenSearchNodeGroupUpdateSpec.encode( + message.nodeGroupSpec, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateOpenSearchNodeGroupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateOpenSearchNodeGroupRequest, + } as UpdateOpenSearchNodeGroupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 4: + message.nodeGroupSpec = OpenSearchNodeGroupUpdateSpec.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateOpenSearchNodeGroupRequest { + const message = { + ...baseUpdateOpenSearchNodeGroupRequest, + } as UpdateOpenSearchNodeGroupRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.nodeGroupSpec = + object.nodeGroupSpec !== undefined && object.nodeGroupSpec !== null + ? OpenSearchNodeGroupUpdateSpec.fromJSON(object.nodeGroupSpec) + : undefined; + return message; + }, + + toJSON(message: UpdateOpenSearchNodeGroupRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.name !== undefined && (obj.name = message.name); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.nodeGroupSpec !== undefined && + (obj.nodeGroupSpec = message.nodeGroupSpec + ? OpenSearchNodeGroupUpdateSpec.toJSON(message.nodeGroupSpec) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateOpenSearchNodeGroupRequest { + const message = { + ...baseUpdateOpenSearchNodeGroupRequest, + } as UpdateOpenSearchNodeGroupRequest; + message.clusterId = object.clusterId ?? ""; + message.name = object.name ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.nodeGroupSpec = + object.nodeGroupSpec !== undefined && object.nodeGroupSpec !== null + ? OpenSearchNodeGroupUpdateSpec.fromPartial(object.nodeGroupSpec) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateOpenSearchNodeGroupRequest.$type, + UpdateOpenSearchNodeGroupRequest +); + +const baseOpenSearchNodeGroupUpdateSpec: object = { + $type: "yandex.cloud.mdb.opensearch.v1.OpenSearchNodeGroupUpdateSpec", + hostsCount: 0, + roles: 0, +}; + +export const OpenSearchNodeGroupUpdateSpec = { + $type: + "yandex.cloud.mdb.opensearch.v1.OpenSearchNodeGroupUpdateSpec" as const, + + encode( + message: OpenSearchNodeGroupUpdateSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); + } + if (message.hostsCount !== 0) { + writer.uint32(16).int64(message.hostsCount); + } + writer.uint32(26).fork(); + for (const v of message.roles) { + writer.int32(v); + } + writer.ldelim(); + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): OpenSearchNodeGroupUpdateSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseOpenSearchNodeGroupUpdateSpec, + } as OpenSearchNodeGroupUpdateSpec; + message.roles = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resources = Resources.decode(reader, reader.uint32()); + break; + case 2: + message.hostsCount = longToNumber(reader.int64() as Long); + break; + case 3: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.roles.push(reader.int32() as any); + } + } else { + message.roles.push(reader.int32() as any); + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OpenSearchNodeGroupUpdateSpec { + const message = { + ...baseOpenSearchNodeGroupUpdateSpec, + } as OpenSearchNodeGroupUpdateSpec; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + message.hostsCount = + object.hostsCount !== undefined && object.hostsCount !== null + ? Number(object.hostsCount) + : 0; + message.roles = (object.roles ?? []).map((e: any) => + openSearch_GroupRoleFromJSON(e) + ); + return message; + }, + + toJSON(message: OpenSearchNodeGroupUpdateSpec): unknown { + const obj: any = {}; + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + message.hostsCount !== undefined && + (obj.hostsCount = Math.round(message.hostsCount)); + if (message.roles) { + obj.roles = message.roles.map((e) => openSearch_GroupRoleToJSON(e)); + } else { + obj.roles = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): OpenSearchNodeGroupUpdateSpec { + const message = { + ...baseOpenSearchNodeGroupUpdateSpec, + } as OpenSearchNodeGroupUpdateSpec; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.hostsCount = object.hostsCount ?? 0; + message.roles = object.roles?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + OpenSearchNodeGroupUpdateSpec.$type, + OpenSearchNodeGroupUpdateSpec +); + +const baseAddOpenSearchNodeGroupRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.AddOpenSearchNodeGroupRequest", + clusterId: "", +}; + +export const AddOpenSearchNodeGroupRequest = { + $type: + "yandex.cloud.mdb.opensearch.v1.AddOpenSearchNodeGroupRequest" as const, + + encode( + message: AddOpenSearchNodeGroupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.nodeGroupSpec !== undefined) { + OpenSearchCreateSpec_NodeGroup.encode( + message.nodeGroupSpec, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AddOpenSearchNodeGroupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAddOpenSearchNodeGroupRequest, + } as AddOpenSearchNodeGroupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.nodeGroupSpec = OpenSearchCreateSpec_NodeGroup.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddOpenSearchNodeGroupRequest { + const message = { + ...baseAddOpenSearchNodeGroupRequest, + } as AddOpenSearchNodeGroupRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.nodeGroupSpec = + object.nodeGroupSpec !== undefined && object.nodeGroupSpec !== null + ? OpenSearchCreateSpec_NodeGroup.fromJSON(object.nodeGroupSpec) + : undefined; + return message; + }, + + toJSON(message: AddOpenSearchNodeGroupRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.nodeGroupSpec !== undefined && + (obj.nodeGroupSpec = message.nodeGroupSpec + ? OpenSearchCreateSpec_NodeGroup.toJSON(message.nodeGroupSpec) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddOpenSearchNodeGroupRequest { + const message = { + ...baseAddOpenSearchNodeGroupRequest, + } as AddOpenSearchNodeGroupRequest; + message.clusterId = object.clusterId ?? ""; + message.nodeGroupSpec = + object.nodeGroupSpec !== undefined && object.nodeGroupSpec !== null + ? OpenSearchCreateSpec_NodeGroup.fromPartial(object.nodeGroupSpec) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + AddOpenSearchNodeGroupRequest.$type, + AddOpenSearchNodeGroupRequest +); + +const baseDeleteDashboardsNodeGroupRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteDashboardsNodeGroupRequest", + clusterId: "", + name: "", +}; + +export const DeleteDashboardsNodeGroupRequest = { + $type: + "yandex.cloud.mdb.opensearch.v1.DeleteDashboardsNodeGroupRequest" as const, + + encode( + message: DeleteDashboardsNodeGroupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteDashboardsNodeGroupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteDashboardsNodeGroupRequest, + } as DeleteDashboardsNodeGroupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteDashboardsNodeGroupRequest { + const message = { + ...baseDeleteDashboardsNodeGroupRequest, + } as DeleteDashboardsNodeGroupRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: DeleteDashboardsNodeGroupRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): DeleteDashboardsNodeGroupRequest { + const message = { + ...baseDeleteDashboardsNodeGroupRequest, + } as DeleteDashboardsNodeGroupRequest; + message.clusterId = object.clusterId ?? ""; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteDashboardsNodeGroupRequest.$type, + DeleteDashboardsNodeGroupRequest +); + +const baseUpdateDashboardsNodeGroupRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateDashboardsNodeGroupRequest", + clusterId: "", + name: "", +}; + +export const UpdateDashboardsNodeGroupRequest = { + $type: + "yandex.cloud.mdb.opensearch.v1.UpdateDashboardsNodeGroupRequest" as const, + + encode( + message: UpdateDashboardsNodeGroupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(26).fork()).ldelim(); + } + if (message.nodeGroupSpec !== undefined) { + DashboardsNodeGroupUpdateSpec.encode( + message.nodeGroupSpec, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateDashboardsNodeGroupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateDashboardsNodeGroupRequest, + } as UpdateDashboardsNodeGroupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 4: + message.nodeGroupSpec = DashboardsNodeGroupUpdateSpec.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateDashboardsNodeGroupRequest { + const message = { + ...baseUpdateDashboardsNodeGroupRequest, + } as UpdateDashboardsNodeGroupRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.nodeGroupSpec = + object.nodeGroupSpec !== undefined && object.nodeGroupSpec !== null + ? DashboardsNodeGroupUpdateSpec.fromJSON(object.nodeGroupSpec) + : undefined; + return message; + }, + + toJSON(message: UpdateDashboardsNodeGroupRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.name !== undefined && (obj.name = message.name); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.nodeGroupSpec !== undefined && + (obj.nodeGroupSpec = message.nodeGroupSpec + ? DashboardsNodeGroupUpdateSpec.toJSON(message.nodeGroupSpec) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateDashboardsNodeGroupRequest { + const message = { + ...baseUpdateDashboardsNodeGroupRequest, + } as UpdateDashboardsNodeGroupRequest; + message.clusterId = object.clusterId ?? ""; + message.name = object.name ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.nodeGroupSpec = + object.nodeGroupSpec !== undefined && object.nodeGroupSpec !== null + ? DashboardsNodeGroupUpdateSpec.fromPartial(object.nodeGroupSpec) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateDashboardsNodeGroupRequest.$type, + UpdateDashboardsNodeGroupRequest +); + +const baseDashboardsNodeGroupUpdateSpec: object = { + $type: "yandex.cloud.mdb.opensearch.v1.DashboardsNodeGroupUpdateSpec", + hostsCount: 0, +}; + +export const DashboardsNodeGroupUpdateSpec = { + $type: + "yandex.cloud.mdb.opensearch.v1.DashboardsNodeGroupUpdateSpec" as const, + + encode( + message: DashboardsNodeGroupUpdateSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); + } + if (message.hostsCount !== 0) { + writer.uint32(16).int64(message.hostsCount); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DashboardsNodeGroupUpdateSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDashboardsNodeGroupUpdateSpec, + } as DashboardsNodeGroupUpdateSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resources = Resources.decode(reader, reader.uint32()); + break; + case 2: + message.hostsCount = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DashboardsNodeGroupUpdateSpec { + const message = { + ...baseDashboardsNodeGroupUpdateSpec, + } as DashboardsNodeGroupUpdateSpec; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + message.hostsCount = + object.hostsCount !== undefined && object.hostsCount !== null + ? Number(object.hostsCount) + : 0; + return message; + }, + + toJSON(message: DashboardsNodeGroupUpdateSpec): unknown { + const obj: any = {}; + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + message.hostsCount !== undefined && + (obj.hostsCount = Math.round(message.hostsCount)); + return obj; + }, + + fromPartial, I>>( + object: I + ): DashboardsNodeGroupUpdateSpec { + const message = { + ...baseDashboardsNodeGroupUpdateSpec, + } as DashboardsNodeGroupUpdateSpec; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.hostsCount = object.hostsCount ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + DashboardsNodeGroupUpdateSpec.$type, + DashboardsNodeGroupUpdateSpec +); + +const baseAddDashboardsNodeGroupRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.AddDashboardsNodeGroupRequest", + clusterId: "", +}; + +export const AddDashboardsNodeGroupRequest = { + $type: + "yandex.cloud.mdb.opensearch.v1.AddDashboardsNodeGroupRequest" as const, + + encode( + message: AddDashboardsNodeGroupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.nodeGroupSpec !== undefined) { + DashboardsCreateSpec_NodeGroup.encode( + message.nodeGroupSpec, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AddDashboardsNodeGroupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAddDashboardsNodeGroupRequest, + } as AddDashboardsNodeGroupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.nodeGroupSpec = DashboardsCreateSpec_NodeGroup.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddDashboardsNodeGroupRequest { + const message = { + ...baseAddDashboardsNodeGroupRequest, + } as AddDashboardsNodeGroupRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.nodeGroupSpec = + object.nodeGroupSpec !== undefined && object.nodeGroupSpec !== null + ? DashboardsCreateSpec_NodeGroup.fromJSON(object.nodeGroupSpec) + : undefined; + return message; + }, + + toJSON(message: AddDashboardsNodeGroupRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.nodeGroupSpec !== undefined && + (obj.nodeGroupSpec = message.nodeGroupSpec + ? DashboardsCreateSpec_NodeGroup.toJSON(message.nodeGroupSpec) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddDashboardsNodeGroupRequest { + const message = { + ...baseAddDashboardsNodeGroupRequest, + } as AddDashboardsNodeGroupRequest; + message.clusterId = object.clusterId ?? ""; + message.nodeGroupSpec = + object.nodeGroupSpec !== undefined && object.nodeGroupSpec !== null + ? DashboardsCreateSpec_NodeGroup.fromPartial(object.nodeGroupSpec) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + AddDashboardsNodeGroupRequest.$type, + AddDashboardsNodeGroupRequest +); + +const baseAddNodeGroupMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.AddNodeGroupMetadata", + clusterId: "", + name: "", +}; + +export const AddNodeGroupMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.AddNodeGroupMetadata" as const, + + encode( + message: AddNodeGroupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AddNodeGroupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAddNodeGroupMetadata } as AddNodeGroupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddNodeGroupMetadata { + const message = { ...baseAddNodeGroupMetadata } as AddNodeGroupMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: AddNodeGroupMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddNodeGroupMetadata { + const message = { ...baseAddNodeGroupMetadata } as AddNodeGroupMetadata; + message.clusterId = object.clusterId ?? ""; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AddNodeGroupMetadata.$type, AddNodeGroupMetadata); + +const baseUpdateNodeGroupMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateNodeGroupMetadata", + clusterId: "", + name: "", +}; + +export const UpdateNodeGroupMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateNodeGroupMetadata" as const, + + encode( + message: UpdateNodeGroupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateNodeGroupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateNodeGroupMetadata, + } as UpdateNodeGroupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateNodeGroupMetadata { + const message = { + ...baseUpdateNodeGroupMetadata, + } as UpdateNodeGroupMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: UpdateNodeGroupMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateNodeGroupMetadata { + const message = { + ...baseUpdateNodeGroupMetadata, + } as UpdateNodeGroupMetadata; + message.clusterId = object.clusterId ?? ""; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateNodeGroupMetadata.$type, UpdateNodeGroupMetadata); + +const baseDeleteNodeGroupMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteNodeGroupMetadata", + clusterId: "", + name: "", +}; + +export const DeleteNodeGroupMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.DeleteNodeGroupMetadata" as const, + + encode( + message: DeleteNodeGroupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteNodeGroupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteNodeGroupMetadata, + } as DeleteNodeGroupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteNodeGroupMetadata { + const message = { + ...baseDeleteNodeGroupMetadata, + } as DeleteNodeGroupMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: DeleteNodeGroupMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteNodeGroupMetadata { + const message = { + ...baseDeleteNodeGroupMetadata, + } as DeleteNodeGroupMetadata; + message.clusterId = object.clusterId ?? ""; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteNodeGroupMetadata.$type, DeleteNodeGroupMetadata); + +const baseGetAuthSettingsRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.GetAuthSettingsRequest", + clusterId: "", +}; + +export const GetAuthSettingsRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.GetAuthSettingsRequest" as const, + + encode( + message: GetAuthSettingsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetAuthSettingsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetAuthSettingsRequest } as GetAuthSettingsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetAuthSettingsRequest { + const message = { ...baseGetAuthSettingsRequest } as GetAuthSettingsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: GetAuthSettingsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetAuthSettingsRequest { + const message = { ...baseGetAuthSettingsRequest } as GetAuthSettingsRequest; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetAuthSettingsRequest.$type, GetAuthSettingsRequest); + +const baseUpdateAuthSettingsRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateAuthSettingsRequest", + clusterId: "", +}; + +export const UpdateAuthSettingsRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateAuthSettingsRequest" as const, + + encode( + message: UpdateAuthSettingsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.settings !== undefined) { + AuthSettings.encode(message.settings, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateAuthSettingsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateAuthSettingsRequest, + } as UpdateAuthSettingsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.settings = AuthSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateAuthSettingsRequest { + const message = { + ...baseUpdateAuthSettingsRequest, + } as UpdateAuthSettingsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? AuthSettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: UpdateAuthSettingsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.settings !== undefined && + (obj.settings = message.settings + ? AuthSettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateAuthSettingsRequest { + const message = { + ...baseUpdateAuthSettingsRequest, + } as UpdateAuthSettingsRequest; + message.clusterId = object.clusterId ?? ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? AuthSettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateAuthSettingsRequest.$type, + UpdateAuthSettingsRequest +); + +const baseUpdateAuthSettingsMetadata: object = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateAuthSettingsMetadata", + clusterId: "", +}; + +export const UpdateAuthSettingsMetadata = { + $type: "yandex.cloud.mdb.opensearch.v1.UpdateAuthSettingsMetadata" as const, + + encode( + message: UpdateAuthSettingsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateAuthSettingsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateAuthSettingsMetadata, + } as UpdateAuthSettingsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateAuthSettingsMetadata { + const message = { + ...baseUpdateAuthSettingsMetadata, + } as UpdateAuthSettingsMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: UpdateAuthSettingsMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateAuthSettingsMetadata { + const message = { + ...baseUpdateAuthSettingsMetadata, + } as UpdateAuthSettingsMetadata; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateAuthSettingsMetadata.$type, + UpdateAuthSettingsMetadata +); + +/** A set of methods for managing OpenSearch clusters. */ +export const ClusterServiceService = { + /** + * Returns the specified OpenSearch cluster. + * + * To get the list of all available OpenSearch clusters, make a [List] request. + */ + get: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetClusterRequest) => + Buffer.from(GetClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetClusterRequest.decode(value), + responseSerialize: (value: Cluster) => + Buffer.from(Cluster.encode(value).finish()), + responseDeserialize: (value: Buffer) => Cluster.decode(value), + }, + /** Retrieves the list of OpenSearch clusters that belong to the specified folder. */ + list: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClustersRequest) => + Buffer.from(ListClustersRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListClustersRequest.decode(value), + responseSerialize: (value: ListClustersResponse) => + Buffer.from(ListClustersResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListClustersResponse.decode(value), + }, + /** Creates an OpenSearch cluster in the specified folder. */ + create: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateClusterRequest) => + Buffer.from(CreateClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified OpenSearch cluster. */ + update: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateClusterRequest) => + Buffer.from(UpdateClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified OpenSearch cluster. */ + delete: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteClusterRequest) => + Buffer.from(DeleteClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Creates a backup for the specified OpenSearch cluster. */ + backup: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/Backup", + requestStream: false, + responseStream: false, + requestSerialize: (value: BackupClusterRequest) => + Buffer.from(BackupClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => BackupClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Creates a new OpenSearch cluster using the specified backup. */ + restore: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/Restore", + requestStream: false, + responseStream: false, + requestSerialize: (value: RestoreClusterRequest) => + Buffer.from(RestoreClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RestoreClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Reschedules a planned maintenance operation. */ + rescheduleMaintenance: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/RescheduleMaintenance", + requestStream: false, + responseStream: false, + requestSerialize: (value: RescheduleMaintenanceRequest) => + Buffer.from(RescheduleMaintenanceRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + RescheduleMaintenanceRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Returns a list of available backups for the specified OpenSearch cluster. */ + listBackups: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/ListBackups", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClusterBackupsRequest) => + Buffer.from(ListClusterBackupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListClusterBackupsRequest.decode(value), + responseSerialize: (value: ListClusterBackupsResponse) => + Buffer.from(ListClusterBackupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListClusterBackupsResponse.decode(value), + }, + /** Moves the specified OpenSearch cluster to the specified folder. */ + move: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/Move", + requestStream: false, + responseStream: false, + requestSerialize: (value: MoveClusterRequest) => + Buffer.from(MoveClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => MoveClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Starts the specified OpenSearch cluster. */ + start: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/Start", + requestStream: false, + responseStream: false, + requestSerialize: (value: StartClusterRequest) => + Buffer.from(StartClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => StartClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Stops the specified OpenSearch cluster. */ + stop: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/Stop", + requestStream: false, + responseStream: false, + requestSerialize: (value: StopClusterRequest) => + Buffer.from(StopClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => StopClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Retrieves logs for the specified OpenSearch cluster. + * For detailed description, see the [Logs](/yandex-mdb-guide/concepts/logs.html) section in the developer's guide. + */ + listLogs: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/ListLogs", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClusterLogsRequest) => + Buffer.from(ListClusterLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListClusterLogsRequest.decode(value), + responseSerialize: (value: ListClusterLogsResponse) => + Buffer.from(ListClusterLogsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListClusterLogsResponse.decode(value), + }, + /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + streamLogs: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/StreamLogs", + requestStream: false, + responseStream: true, + requestSerialize: (value: StreamClusterLogsRequest) => + Buffer.from(StreamClusterLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + StreamClusterLogsRequest.decode(value), + responseSerialize: (value: StreamLogRecord) => + Buffer.from(StreamLogRecord.encode(value).finish()), + responseDeserialize: (value: Buffer) => StreamLogRecord.decode(value), + }, + /** Retrieves the list of Operation resources for the specified cluster. */ + listOperations: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/ListOperations", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClusterOperationsRequest) => + Buffer.from(ListClusterOperationsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListClusterOperationsRequest.decode(value), + responseSerialize: (value: ListClusterOperationsResponse) => + Buffer.from(ListClusterOperationsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListClusterOperationsResponse.decode(value), + }, + /** Retrieves a list of hosts for the specified cluster. */ + listHosts: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/ListHosts", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClusterHostsRequest) => + Buffer.from(ListClusterHostsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListClusterHostsRequest.decode(value), + responseSerialize: (value: ListClusterHostsResponse) => + Buffer.from(ListClusterHostsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListClusterHostsResponse.decode(value), + }, + /** Creates an OpenSearch type host group. */ + addOpenSearchNodeGroup: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/AddOpenSearchNodeGroup", + requestStream: false, + responseStream: false, + requestSerialize: (value: AddOpenSearchNodeGroupRequest) => + Buffer.from(AddOpenSearchNodeGroupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + AddOpenSearchNodeGroupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes an OpenSearch type host group. */ + deleteOpenSearchNodeGroup: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/DeleteOpenSearchNodeGroup", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteOpenSearchNodeGroupRequest) => + Buffer.from(DeleteOpenSearchNodeGroupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteOpenSearchNodeGroupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates an OpenSearch type host group. */ + updateOpenSearchNodeGroup: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/UpdateOpenSearchNodeGroup", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateOpenSearchNodeGroupRequest) => + Buffer.from(UpdateOpenSearchNodeGroupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateOpenSearchNodeGroupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Creates a Dashboards type host group. */ + addDashboardsNodeGroup: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/AddDashboardsNodeGroup", + requestStream: false, + responseStream: false, + requestSerialize: (value: AddDashboardsNodeGroupRequest) => + Buffer.from(AddDashboardsNodeGroupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + AddDashboardsNodeGroupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes a Dashboards type host group. */ + deleteDashboardsNodeGroup: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/DeleteDashboardsNodeGroup", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteDashboardsNodeGroupRequest) => + Buffer.from(DeleteDashboardsNodeGroupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteDashboardsNodeGroupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates a Dashboards type host group. */ + updateDashboardsNodeGroup: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/UpdateDashboardsNodeGroup", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateDashboardsNodeGroupRequest) => + Buffer.from(UpdateDashboardsNodeGroupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateDashboardsNodeGroupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Retrieves auth settings for specified cluster. */ + getAuthSettings: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/GetAuthSettings", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetAuthSettingsRequest) => + Buffer.from(GetAuthSettingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetAuthSettingsRequest.decode(value), + responseSerialize: (value: AuthSettings) => + Buffer.from(AuthSettings.encode(value).finish()), + responseDeserialize: (value: Buffer) => AuthSettings.decode(value), + }, + /** Updates auth settings for specified cluster. */ + updateAuthSettings: { + path: "/yandex.cloud.mdb.opensearch.v1.ClusterService/UpdateAuthSettings", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAuthSettingsRequest) => + Buffer.from(UpdateAuthSettingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAuthSettingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface ClusterServiceServer extends UntypedServiceImplementation { + /** + * Returns the specified OpenSearch cluster. + * + * To get the list of all available OpenSearch clusters, make a [List] request. + */ + get: handleUnaryCall; + /** Retrieves the list of OpenSearch clusters that belong to the specified folder. */ + list: handleUnaryCall; + /** Creates an OpenSearch cluster in the specified folder. */ + create: handleUnaryCall; + /** Updates the specified OpenSearch cluster. */ + update: handleUnaryCall; + /** Deletes the specified OpenSearch cluster. */ + delete: handleUnaryCall; + /** Creates a backup for the specified OpenSearch cluster. */ + backup: handleUnaryCall; + /** Creates a new OpenSearch cluster using the specified backup. */ + restore: handleUnaryCall; + /** Reschedules a planned maintenance operation. */ + rescheduleMaintenance: handleUnaryCall< + RescheduleMaintenanceRequest, + Operation + >; + /** Returns a list of available backups for the specified OpenSearch cluster. */ + listBackups: handleUnaryCall< + ListClusterBackupsRequest, + ListClusterBackupsResponse + >; + /** Moves the specified OpenSearch cluster to the specified folder. */ + move: handleUnaryCall; + /** Starts the specified OpenSearch cluster. */ + start: handleUnaryCall; + /** Stops the specified OpenSearch cluster. */ + stop: handleUnaryCall; + /** + * Retrieves logs for the specified OpenSearch cluster. + * For detailed description, see the [Logs](/yandex-mdb-guide/concepts/logs.html) section in the developer's guide. + */ + listLogs: handleUnaryCall; + /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + streamLogs: handleServerStreamingCall< + StreamClusterLogsRequest, + StreamLogRecord + >; + /** Retrieves the list of Operation resources for the specified cluster. */ + listOperations: handleUnaryCall< + ListClusterOperationsRequest, + ListClusterOperationsResponse + >; + /** Retrieves a list of hosts for the specified cluster. */ + listHosts: handleUnaryCall; + /** Creates an OpenSearch type host group. */ + addOpenSearchNodeGroup: handleUnaryCall< + AddOpenSearchNodeGroupRequest, + Operation + >; + /** Deletes an OpenSearch type host group. */ + deleteOpenSearchNodeGroup: handleUnaryCall< + DeleteOpenSearchNodeGroupRequest, + Operation + >; + /** Updates an OpenSearch type host group. */ + updateOpenSearchNodeGroup: handleUnaryCall< + UpdateOpenSearchNodeGroupRequest, + Operation + >; + /** Creates a Dashboards type host group. */ + addDashboardsNodeGroup: handleUnaryCall< + AddDashboardsNodeGroupRequest, + Operation + >; + /** Deletes a Dashboards type host group. */ + deleteDashboardsNodeGroup: handleUnaryCall< + DeleteDashboardsNodeGroupRequest, + Operation + >; + /** Updates a Dashboards type host group. */ + updateDashboardsNodeGroup: handleUnaryCall< + UpdateDashboardsNodeGroupRequest, + Operation + >; + /** Retrieves auth settings for specified cluster. */ + getAuthSettings: handleUnaryCall; + /** Updates auth settings for specified cluster. */ + updateAuthSettings: handleUnaryCall; +} + +export interface ClusterServiceClient extends Client { + /** + * Returns the specified OpenSearch cluster. + * + * To get the list of all available OpenSearch clusters, make a [List] request. + */ + get( + request: GetClusterRequest, + callback: (error: ServiceError | null, response: Cluster) => void + ): ClientUnaryCall; + get( + request: GetClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Cluster) => void + ): ClientUnaryCall; + get( + request: GetClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Cluster) => void + ): ClientUnaryCall; + /** Retrieves the list of OpenSearch clusters that belong to the specified folder. */ + list( + request: ListClustersRequest, + callback: ( + error: ServiceError | null, + response: ListClustersResponse + ) => void + ): ClientUnaryCall; + list( + request: ListClustersRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClustersResponse + ) => void + ): ClientUnaryCall; + list( + request: ListClustersRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClustersResponse + ) => void + ): ClientUnaryCall; + /** Creates an OpenSearch cluster in the specified folder. */ + create( + request: CreateClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified OpenSearch cluster. */ + update( + request: UpdateClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified OpenSearch cluster. */ + delete( + request: DeleteClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Creates a backup for the specified OpenSearch cluster. */ + backup( + request: BackupClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + backup( + request: BackupClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + backup( + request: BackupClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Creates a new OpenSearch cluster using the specified backup. */ + restore( + request: RestoreClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + restore( + request: RestoreClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + restore( + request: RestoreClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Reschedules a planned maintenance operation. */ + rescheduleMaintenance( + request: RescheduleMaintenanceRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + rescheduleMaintenance( + request: RescheduleMaintenanceRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + rescheduleMaintenance( + request: RescheduleMaintenanceRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Returns a list of available backups for the specified OpenSearch cluster. */ + listBackups( + request: ListClusterBackupsRequest, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + listBackups( + request: ListClusterBackupsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + listBackups( + request: ListClusterBackupsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + /** Moves the specified OpenSearch cluster to the specified folder. */ + move( + request: MoveClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Starts the specified OpenSearch cluster. */ + start( + request: StartClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + start( + request: StartClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + start( + request: StartClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Stops the specified OpenSearch cluster. */ + stop( + request: StopClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + stop( + request: StopClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + stop( + request: StopClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Retrieves logs for the specified OpenSearch cluster. + * For detailed description, see the [Logs](/yandex-mdb-guide/concepts/logs.html) section in the developer's guide. + */ + listLogs( + request: ListClusterLogsRequest, + callback: ( + error: ServiceError | null, + response: ListClusterLogsResponse + ) => void + ): ClientUnaryCall; + listLogs( + request: ListClusterLogsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClusterLogsResponse + ) => void + ): ClientUnaryCall; + listLogs( + request: ListClusterLogsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClusterLogsResponse + ) => void + ): ClientUnaryCall; + /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + streamLogs( + request: StreamClusterLogsRequest, + options?: Partial + ): ClientReadableStream; + streamLogs( + request: StreamClusterLogsRequest, + metadata?: Metadata, + options?: Partial + ): ClientReadableStream; + /** Retrieves the list of Operation resources for the specified cluster. */ + listOperations( + request: ListClusterOperationsRequest, + callback: ( + error: ServiceError | null, + response: ListClusterOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListClusterOperationsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClusterOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListClusterOperationsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClusterOperationsResponse + ) => void + ): ClientUnaryCall; + /** Retrieves a list of hosts for the specified cluster. */ + listHosts( + request: ListClusterHostsRequest, + callback: ( + error: ServiceError | null, + response: ListClusterHostsResponse + ) => void + ): ClientUnaryCall; + listHosts( + request: ListClusterHostsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClusterHostsResponse + ) => void + ): ClientUnaryCall; + listHosts( + request: ListClusterHostsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClusterHostsResponse + ) => void + ): ClientUnaryCall; + /** Creates an OpenSearch type host group. */ + addOpenSearchNodeGroup( + request: AddOpenSearchNodeGroupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + addOpenSearchNodeGroup( + request: AddOpenSearchNodeGroupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + addOpenSearchNodeGroup( + request: AddOpenSearchNodeGroupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes an OpenSearch type host group. */ + deleteOpenSearchNodeGroup( + request: DeleteOpenSearchNodeGroupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteOpenSearchNodeGroup( + request: DeleteOpenSearchNodeGroupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteOpenSearchNodeGroup( + request: DeleteOpenSearchNodeGroupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates an OpenSearch type host group. */ + updateOpenSearchNodeGroup( + request: UpdateOpenSearchNodeGroupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateOpenSearchNodeGroup( + request: UpdateOpenSearchNodeGroupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateOpenSearchNodeGroup( + request: UpdateOpenSearchNodeGroupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Creates a Dashboards type host group. */ + addDashboardsNodeGroup( + request: AddDashboardsNodeGroupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + addDashboardsNodeGroup( + request: AddDashboardsNodeGroupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + addDashboardsNodeGroup( + request: AddDashboardsNodeGroupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes a Dashboards type host group. */ + deleteDashboardsNodeGroup( + request: DeleteDashboardsNodeGroupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteDashboardsNodeGroup( + request: DeleteDashboardsNodeGroupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteDashboardsNodeGroup( + request: DeleteDashboardsNodeGroupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates a Dashboards type host group. */ + updateDashboardsNodeGroup( + request: UpdateDashboardsNodeGroupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateDashboardsNodeGroup( + request: UpdateDashboardsNodeGroupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateDashboardsNodeGroup( + request: UpdateDashboardsNodeGroupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Retrieves auth settings for specified cluster. */ + getAuthSettings( + request: GetAuthSettingsRequest, + callback: (error: ServiceError | null, response: AuthSettings) => void + ): ClientUnaryCall; + getAuthSettings( + request: GetAuthSettingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: AuthSettings) => void + ): ClientUnaryCall; + getAuthSettings( + request: GetAuthSettingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: AuthSettings) => void + ): ClientUnaryCall; + /** Updates auth settings for specified cluster. */ + updateAuthSettings( + request: UpdateAuthSettingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAuthSettings( + request: UpdateAuthSettingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAuthSettings( + request: UpdateAuthSettingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const ClusterServiceClient = makeGenericClientConstructor( + ClusterServiceService, + "yandex.cloud.mdb.opensearch.v1.ClusterService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ClusterServiceClient; + service: typeof ClusterServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/opensearch/v1/config/opensearch.ts b/src/generated/yandex/cloud/mdb/opensearch/v1/config/opensearch.ts new file mode 100644 index 00000000..d6392bf1 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/opensearch/v1/config/opensearch.ts @@ -0,0 +1,277 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Int64Value } from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.opensearch.v1.config"; + +export interface OpenSearchConfig2 { + $type: "yandex.cloud.mdb.opensearch.v1.config.OpenSearchConfig2"; + /** the maximum number of allowed boolean clauses in a query */ + maxClauseCount?: number; + /** the percentage or absolute value (10%, 512mb) of heap space that is allocated to fielddata */ + fielddataCacheSize: string; + reindexRemoteWhitelist: string; +} + +export interface OpenSearchConfigSet2 { + $type: "yandex.cloud.mdb.opensearch.v1.config.OpenSearchConfigSet2"; + effectiveConfig?: OpenSearchConfig2; + userConfig?: OpenSearchConfig2; + defaultConfig?: OpenSearchConfig2; +} + +const baseOpenSearchConfig2: object = { + $type: "yandex.cloud.mdb.opensearch.v1.config.OpenSearchConfig2", + fielddataCacheSize: "", + reindexRemoteWhitelist: "", +}; + +export const OpenSearchConfig2 = { + $type: "yandex.cloud.mdb.opensearch.v1.config.OpenSearchConfig2" as const, + + encode( + message: OpenSearchConfig2, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxClauseCount !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxClauseCount! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.fielddataCacheSize !== "") { + writer.uint32(34).string(message.fielddataCacheSize); + } + if (message.reindexRemoteWhitelist !== "") { + writer.uint32(50).string(message.reindexRemoteWhitelist); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): OpenSearchConfig2 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOpenSearchConfig2 } as OpenSearchConfig2; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 3: + message.maxClauseCount = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.fielddataCacheSize = reader.string(); + break; + case 6: + message.reindexRemoteWhitelist = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OpenSearchConfig2 { + const message = { ...baseOpenSearchConfig2 } as OpenSearchConfig2; + message.maxClauseCount = + object.maxClauseCount !== undefined && object.maxClauseCount !== null + ? Number(object.maxClauseCount) + : undefined; + message.fielddataCacheSize = + object.fielddataCacheSize !== undefined && + object.fielddataCacheSize !== null + ? String(object.fielddataCacheSize) + : ""; + message.reindexRemoteWhitelist = + object.reindexRemoteWhitelist !== undefined && + object.reindexRemoteWhitelist !== null + ? String(object.reindexRemoteWhitelist) + : ""; + return message; + }, + + toJSON(message: OpenSearchConfig2): unknown { + const obj: any = {}; + message.maxClauseCount !== undefined && + (obj.maxClauseCount = message.maxClauseCount); + message.fielddataCacheSize !== undefined && + (obj.fielddataCacheSize = message.fielddataCacheSize); + message.reindexRemoteWhitelist !== undefined && + (obj.reindexRemoteWhitelist = message.reindexRemoteWhitelist); + return obj; + }, + + fromPartial, I>>( + object: I + ): OpenSearchConfig2 { + const message = { ...baseOpenSearchConfig2 } as OpenSearchConfig2; + message.maxClauseCount = object.maxClauseCount ?? undefined; + message.fielddataCacheSize = object.fielddataCacheSize ?? ""; + message.reindexRemoteWhitelist = object.reindexRemoteWhitelist ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(OpenSearchConfig2.$type, OpenSearchConfig2); + +const baseOpenSearchConfigSet2: object = { + $type: "yandex.cloud.mdb.opensearch.v1.config.OpenSearchConfigSet2", +}; + +export const OpenSearchConfigSet2 = { + $type: "yandex.cloud.mdb.opensearch.v1.config.OpenSearchConfigSet2" as const, + + encode( + message: OpenSearchConfigSet2, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + OpenSearchConfig2.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + OpenSearchConfig2.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + OpenSearchConfig2.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): OpenSearchConfigSet2 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOpenSearchConfigSet2 } as OpenSearchConfigSet2; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = OpenSearchConfig2.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = OpenSearchConfig2.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = OpenSearchConfig2.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OpenSearchConfigSet2 { + const message = { ...baseOpenSearchConfigSet2 } as OpenSearchConfigSet2; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? OpenSearchConfig2.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? OpenSearchConfig2.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? OpenSearchConfig2.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: OpenSearchConfigSet2): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? OpenSearchConfig2.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? OpenSearchConfig2.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? OpenSearchConfig2.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): OpenSearchConfigSet2 { + const message = { ...baseOpenSearchConfigSet2 } as OpenSearchConfigSet2; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? OpenSearchConfig2.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? OpenSearchConfig2.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? OpenSearchConfig2.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(OpenSearchConfigSet2.$type, OpenSearchConfigSet2); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/opensearch/v1/maintenance.ts b/src/generated/yandex/cloud/mdb/opensearch/v1/maintenance.ts new file mode 100644 index 00000000..73734a23 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/opensearch/v1/maintenance.ts @@ -0,0 +1,561 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.mdb.opensearch.v1"; + +/** An OpenSearch cluster maintenance window. Should be defined by either one of the two options. */ +export interface MaintenanceWindow { + $type: "yandex.cloud.mdb.opensearch.v1.MaintenanceWindow"; + /** An any-time maintenance window. */ + anytime?: AnytimeMaintenanceWindow | undefined; + /** A weekly maintenance window. */ + weeklyMaintenanceWindow?: WeeklyMaintenanceWindow | undefined; +} + +/** An any-time maintenance window. */ +export interface AnytimeMaintenanceWindow { + $type: "yandex.cloud.mdb.opensearch.v1.AnytimeMaintenanceWindow"; +} + +/** A weekly maintenance window. */ +export interface WeeklyMaintenanceWindow { + $type: "yandex.cloud.mdb.opensearch.v1.WeeklyMaintenanceWindow"; + /** Day of the week. */ + day: WeeklyMaintenanceWindow_WeekDay; + /** Hour of the day in the UTC timezone. */ + hour: number; +} + +export enum WeeklyMaintenanceWindow_WeekDay { + WEEK_DAY_UNSPECIFIED = 0, + /** MON - Monday */ + MON = 1, + /** TUE - Tuesday */ + TUE = 2, + /** WED - Wednesday */ + WED = 3, + /** THU - Thursday */ + THU = 4, + /** FRI - Friday */ + FRI = 5, + /** SAT - Saturday */ + SAT = 6, + /** SUN - Sunday */ + SUN = 7, + UNRECOGNIZED = -1, +} + +export function weeklyMaintenanceWindow_WeekDayFromJSON( + object: any +): WeeklyMaintenanceWindow_WeekDay { + switch (object) { + case 0: + case "WEEK_DAY_UNSPECIFIED": + return WeeklyMaintenanceWindow_WeekDay.WEEK_DAY_UNSPECIFIED; + case 1: + case "MON": + return WeeklyMaintenanceWindow_WeekDay.MON; + case 2: + case "TUE": + return WeeklyMaintenanceWindow_WeekDay.TUE; + case 3: + case "WED": + return WeeklyMaintenanceWindow_WeekDay.WED; + case 4: + case "THU": + return WeeklyMaintenanceWindow_WeekDay.THU; + case 5: + case "FRI": + return WeeklyMaintenanceWindow_WeekDay.FRI; + case 6: + case "SAT": + return WeeklyMaintenanceWindow_WeekDay.SAT; + case 7: + case "SUN": + return WeeklyMaintenanceWindow_WeekDay.SUN; + case -1: + case "UNRECOGNIZED": + default: + return WeeklyMaintenanceWindow_WeekDay.UNRECOGNIZED; + } +} + +export function weeklyMaintenanceWindow_WeekDayToJSON( + object: WeeklyMaintenanceWindow_WeekDay +): string { + switch (object) { + case WeeklyMaintenanceWindow_WeekDay.WEEK_DAY_UNSPECIFIED: + return "WEEK_DAY_UNSPECIFIED"; + case WeeklyMaintenanceWindow_WeekDay.MON: + return "MON"; + case WeeklyMaintenanceWindow_WeekDay.TUE: + return "TUE"; + case WeeklyMaintenanceWindow_WeekDay.WED: + return "WED"; + case WeeklyMaintenanceWindow_WeekDay.THU: + return "THU"; + case WeeklyMaintenanceWindow_WeekDay.FRI: + return "FRI"; + case WeeklyMaintenanceWindow_WeekDay.SAT: + return "SAT"; + case WeeklyMaintenanceWindow_WeekDay.SUN: + return "SUN"; + default: + return "UNKNOWN"; + } +} + +export interface MaintenanceOperation { + $type: "yandex.cloud.mdb.opensearch.v1.MaintenanceOperation"; + /** The description of the operation. */ + info: string; + /** Delay time for the maintenance operation. */ + delayedUntil?: Date; + /** Time of the last maintenance window. */ + latestMaintenanceTime?: Date; + /** Time of the next maintenance window. */ + nextMaintenanceWindowTime?: Date; +} + +const baseMaintenanceWindow: object = { + $type: "yandex.cloud.mdb.opensearch.v1.MaintenanceWindow", +}; + +export const MaintenanceWindow = { + $type: "yandex.cloud.mdb.opensearch.v1.MaintenanceWindow" as const, + + encode( + message: MaintenanceWindow, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.anytime !== undefined) { + AnytimeMaintenanceWindow.encode( + message.anytime, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.weeklyMaintenanceWindow !== undefined) { + WeeklyMaintenanceWindow.encode( + message.weeklyMaintenanceWindow, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MaintenanceWindow { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMaintenanceWindow } as MaintenanceWindow; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.anytime = AnytimeMaintenanceWindow.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.weeklyMaintenanceWindow = WeeklyMaintenanceWindow.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MaintenanceWindow { + const message = { ...baseMaintenanceWindow } as MaintenanceWindow; + message.anytime = + object.anytime !== undefined && object.anytime !== null + ? AnytimeMaintenanceWindow.fromJSON(object.anytime) + : undefined; + message.weeklyMaintenanceWindow = + object.weeklyMaintenanceWindow !== undefined && + object.weeklyMaintenanceWindow !== null + ? WeeklyMaintenanceWindow.fromJSON(object.weeklyMaintenanceWindow) + : undefined; + return message; + }, + + toJSON(message: MaintenanceWindow): unknown { + const obj: any = {}; + message.anytime !== undefined && + (obj.anytime = message.anytime + ? AnytimeMaintenanceWindow.toJSON(message.anytime) + : undefined); + message.weeklyMaintenanceWindow !== undefined && + (obj.weeklyMaintenanceWindow = message.weeklyMaintenanceWindow + ? WeeklyMaintenanceWindow.toJSON(message.weeklyMaintenanceWindow) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): MaintenanceWindow { + const message = { ...baseMaintenanceWindow } as MaintenanceWindow; + message.anytime = + object.anytime !== undefined && object.anytime !== null + ? AnytimeMaintenanceWindow.fromPartial(object.anytime) + : undefined; + message.weeklyMaintenanceWindow = + object.weeklyMaintenanceWindow !== undefined && + object.weeklyMaintenanceWindow !== null + ? WeeklyMaintenanceWindow.fromPartial(object.weeklyMaintenanceWindow) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MaintenanceWindow.$type, MaintenanceWindow); + +const baseAnytimeMaintenanceWindow: object = { + $type: "yandex.cloud.mdb.opensearch.v1.AnytimeMaintenanceWindow", +}; + +export const AnytimeMaintenanceWindow = { + $type: "yandex.cloud.mdb.opensearch.v1.AnytimeMaintenanceWindow" as const, + + encode( + _: AnytimeMaintenanceWindow, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AnytimeMaintenanceWindow { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAnytimeMaintenanceWindow, + } as AnytimeMaintenanceWindow; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): AnytimeMaintenanceWindow { + const message = { + ...baseAnytimeMaintenanceWindow, + } as AnytimeMaintenanceWindow; + return message; + }, + + toJSON(_: AnytimeMaintenanceWindow): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): AnytimeMaintenanceWindow { + const message = { + ...baseAnytimeMaintenanceWindow, + } as AnytimeMaintenanceWindow; + return message; + }, +}; + +messageTypeRegistry.set( + AnytimeMaintenanceWindow.$type, + AnytimeMaintenanceWindow +); + +const baseWeeklyMaintenanceWindow: object = { + $type: "yandex.cloud.mdb.opensearch.v1.WeeklyMaintenanceWindow", + day: 0, + hour: 0, +}; + +export const WeeklyMaintenanceWindow = { + $type: "yandex.cloud.mdb.opensearch.v1.WeeklyMaintenanceWindow" as const, + + encode( + message: WeeklyMaintenanceWindow, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.day !== 0) { + writer.uint32(8).int32(message.day); + } + if (message.hour !== 0) { + writer.uint32(16).int64(message.hour); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): WeeklyMaintenanceWindow { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseWeeklyMaintenanceWindow, + } as WeeklyMaintenanceWindow; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.day = reader.int32() as any; + break; + case 2: + message.hour = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): WeeklyMaintenanceWindow { + const message = { + ...baseWeeklyMaintenanceWindow, + } as WeeklyMaintenanceWindow; + message.day = + object.day !== undefined && object.day !== null + ? weeklyMaintenanceWindow_WeekDayFromJSON(object.day) + : 0; + message.hour = + object.hour !== undefined && object.hour !== null + ? Number(object.hour) + : 0; + return message; + }, + + toJSON(message: WeeklyMaintenanceWindow): unknown { + const obj: any = {}; + message.day !== undefined && + (obj.day = weeklyMaintenanceWindow_WeekDayToJSON(message.day)); + message.hour !== undefined && (obj.hour = Math.round(message.hour)); + return obj; + }, + + fromPartial, I>>( + object: I + ): WeeklyMaintenanceWindow { + const message = { + ...baseWeeklyMaintenanceWindow, + } as WeeklyMaintenanceWindow; + message.day = object.day ?? 0; + message.hour = object.hour ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(WeeklyMaintenanceWindow.$type, WeeklyMaintenanceWindow); + +const baseMaintenanceOperation: object = { + $type: "yandex.cloud.mdb.opensearch.v1.MaintenanceOperation", + info: "", +}; + +export const MaintenanceOperation = { + $type: "yandex.cloud.mdb.opensearch.v1.MaintenanceOperation" as const, + + encode( + message: MaintenanceOperation, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.info !== "") { + writer.uint32(10).string(message.info); + } + if (message.delayedUntil !== undefined) { + Timestamp.encode( + toTimestamp(message.delayedUntil), + writer.uint32(18).fork() + ).ldelim(); + } + if (message.latestMaintenanceTime !== undefined) { + Timestamp.encode( + toTimestamp(message.latestMaintenanceTime), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.nextMaintenanceWindowTime !== undefined) { + Timestamp.encode( + toTimestamp(message.nextMaintenanceWindowTime), + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): MaintenanceOperation { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMaintenanceOperation } as MaintenanceOperation; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.info = reader.string(); + break; + case 2: + message.delayedUntil = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 3: + message.latestMaintenanceTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.nextMaintenanceWindowTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MaintenanceOperation { + const message = { ...baseMaintenanceOperation } as MaintenanceOperation; + message.info = + object.info !== undefined && object.info !== null + ? String(object.info) + : ""; + message.delayedUntil = + object.delayedUntil !== undefined && object.delayedUntil !== null + ? fromJsonTimestamp(object.delayedUntil) + : undefined; + message.latestMaintenanceTime = + object.latestMaintenanceTime !== undefined && + object.latestMaintenanceTime !== null + ? fromJsonTimestamp(object.latestMaintenanceTime) + : undefined; + message.nextMaintenanceWindowTime = + object.nextMaintenanceWindowTime !== undefined && + object.nextMaintenanceWindowTime !== null + ? fromJsonTimestamp(object.nextMaintenanceWindowTime) + : undefined; + return message; + }, + + toJSON(message: MaintenanceOperation): unknown { + const obj: any = {}; + message.info !== undefined && (obj.info = message.info); + message.delayedUntil !== undefined && + (obj.delayedUntil = message.delayedUntil.toISOString()); + message.latestMaintenanceTime !== undefined && + (obj.latestMaintenanceTime = message.latestMaintenanceTime.toISOString()); + message.nextMaintenanceWindowTime !== undefined && + (obj.nextMaintenanceWindowTime = + message.nextMaintenanceWindowTime.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): MaintenanceOperation { + const message = { ...baseMaintenanceOperation } as MaintenanceOperation; + message.info = object.info ?? ""; + message.delayedUntil = object.delayedUntil ?? undefined; + message.latestMaintenanceTime = object.latestMaintenanceTime ?? undefined; + message.nextMaintenanceWindowTime = + object.nextMaintenanceWindowTime ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(MaintenanceOperation.$type, MaintenanceOperation); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/opensearch/v1/resource_preset.ts b/src/generated/yandex/cloud/mdb/opensearch/v1/resource_preset.ts new file mode 100644 index 00000000..444c8f58 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/opensearch/v1/resource_preset.ts @@ -0,0 +1,170 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.mdb.opensearch.v1"; + +/** A preset of resources for hardware configuration of the OpenSearch hosts. */ +export interface ResourcePreset { + $type: "yandex.cloud.mdb.opensearch.v1.ResourcePreset"; + /** ID of the resource preset. */ + id: string; + /** IDs of the availability zones where the resource preset is available. */ + zoneIds: string[]; + /** Number of the CPU cores for an OpenSearch host created with the preset. */ + cores: number; + /** RAM volume for an OpenSearch host created with the preset, in bytes. */ + memory: number; +} + +const baseResourcePreset: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ResourcePreset", + id: "", + zoneIds: "", + cores: 0, + memory: 0, +}; + +export const ResourcePreset = { + $type: "yandex.cloud.mdb.opensearch.v1.ResourcePreset" as const, + + encode( + message: ResourcePreset, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + for (const v of message.zoneIds) { + writer.uint32(18).string(v!); + } + if (message.cores !== 0) { + writer.uint32(24).int64(message.cores); + } + if (message.memory !== 0) { + writer.uint32(32).int64(message.memory); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ResourcePreset { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseResourcePreset } as ResourcePreset; + message.zoneIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.zoneIds.push(reader.string()); + break; + case 3: + message.cores = longToNumber(reader.int64() as Long); + break; + case 4: + message.memory = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ResourcePreset { + const message = { ...baseResourcePreset } as ResourcePreset; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.zoneIds = (object.zoneIds ?? []).map((e: any) => String(e)); + message.cores = + object.cores !== undefined && object.cores !== null + ? Number(object.cores) + : 0; + message.memory = + object.memory !== undefined && object.memory !== null + ? Number(object.memory) + : 0; + return message; + }, + + toJSON(message: ResourcePreset): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + if (message.zoneIds) { + obj.zoneIds = message.zoneIds.map((e) => e); + } else { + obj.zoneIds = []; + } + message.cores !== undefined && (obj.cores = Math.round(message.cores)); + message.memory !== undefined && (obj.memory = Math.round(message.memory)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ResourcePreset { + const message = { ...baseResourcePreset } as ResourcePreset; + message.id = object.id ?? ""; + message.zoneIds = object.zoneIds?.map((e) => e) || []; + message.cores = object.cores ?? 0; + message.memory = object.memory ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ResourcePreset.$type, ResourcePreset); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/opensearch/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/opensearch/v1/resource_preset_service.ts new file mode 100644 index 00000000..82ff0a79 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/opensearch/v1/resource_preset_service.ts @@ -0,0 +1,479 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { ResourcePreset } from "../../../../../yandex/cloud/mdb/opensearch/v1/resource_preset"; + +export const protobufPackage = "yandex.cloud.mdb.opensearch.v1"; + +export interface GetResourcePresetRequest { + $type: "yandex.cloud.mdb.opensearch.v1.GetResourcePresetRequest"; + /** + * ID of the resource preset to return. + * + * To get the resource preset ID, use a [ResourcePresetService.List] request. + */ + resourcePresetId: string; +} + +export interface ListResourcePresetsRequest { + $type: "yandex.cloud.mdb.opensearch.v1.ListResourcePresetsRequest"; + /** + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns + * a [ListResourcePresetsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] + * returned by the previous list request. + */ + pageToken: string; +} + +export interface ListResourcePresetsResponse { + $type: "yandex.cloud.mdb.opensearch.v1.ListResourcePresetsResponse"; + /** List of resource presets. */ + resourcePresets: ResourcePreset[]; + /** + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value + * for the [ListResourcePresetsRequest.page_token] parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetResourcePresetRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.GetResourcePresetRequest", + resourcePresetId: "", +}; + +export const GetResourcePresetRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.GetResourcePresetRequest" as const, + + encode( + message: GetResourcePresetRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourcePresetId !== "") { + writer.uint32(10).string(message.resourcePresetId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetResourcePresetRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetResourcePresetRequest, + } as GetResourcePresetRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourcePresetId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetResourcePresetRequest { + const message = { + ...baseGetResourcePresetRequest, + } as GetResourcePresetRequest; + message.resourcePresetId = + object.resourcePresetId !== undefined && object.resourcePresetId !== null + ? String(object.resourcePresetId) + : ""; + return message; + }, + + toJSON(message: GetResourcePresetRequest): unknown { + const obj: any = {}; + message.resourcePresetId !== undefined && + (obj.resourcePresetId = message.resourcePresetId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetResourcePresetRequest { + const message = { + ...baseGetResourcePresetRequest, + } as GetResourcePresetRequest; + message.resourcePresetId = object.resourcePresetId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GetResourcePresetRequest.$type, + GetResourcePresetRequest +); + +const baseListResourcePresetsRequest: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListResourcePresetsRequest", + pageSize: 0, + pageToken: "", +}; + +export const ListResourcePresetsRequest = { + $type: "yandex.cloud.mdb.opensearch.v1.ListResourcePresetsRequest" as const, + + encode( + message: ListResourcePresetsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.pageSize !== 0) { + writer.uint32(8).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(18).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListResourcePresetsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListResourcePresetsRequest, + } as ListResourcePresetsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 2: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListResourcePresetsRequest { + const message = { + ...baseListResourcePresetsRequest, + } as ListResourcePresetsRequest; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListResourcePresetsRequest): unknown { + const obj: any = {}; + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListResourcePresetsRequest { + const message = { + ...baseListResourcePresetsRequest, + } as ListResourcePresetsRequest; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListResourcePresetsRequest.$type, + ListResourcePresetsRequest +); + +const baseListResourcePresetsResponse: object = { + $type: "yandex.cloud.mdb.opensearch.v1.ListResourcePresetsResponse", + nextPageToken: "", +}; + +export const ListResourcePresetsResponse = { + $type: "yandex.cloud.mdb.opensearch.v1.ListResourcePresetsResponse" as const, + + encode( + message: ListResourcePresetsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.resourcePresets) { + ResourcePreset.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListResourcePresetsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListResourcePresetsResponse, + } as ListResourcePresetsResponse; + message.resourcePresets = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourcePresets.push( + ResourcePreset.decode(reader, reader.uint32()) + ); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListResourcePresetsResponse { + const message = { + ...baseListResourcePresetsResponse, + } as ListResourcePresetsResponse; + message.resourcePresets = (object.resourcePresets ?? []).map((e: any) => + ResourcePreset.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListResourcePresetsResponse): unknown { + const obj: any = {}; + if (message.resourcePresets) { + obj.resourcePresets = message.resourcePresets.map((e) => + e ? ResourcePreset.toJSON(e) : undefined + ); + } else { + obj.resourcePresets = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListResourcePresetsResponse { + const message = { + ...baseListResourcePresetsResponse, + } as ListResourcePresetsResponse; + message.resourcePresets = + object.resourcePresets?.map((e) => ResourcePreset.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListResourcePresetsResponse.$type, + ListResourcePresetsResponse +); + +/** A set of methods for managing resource presets. */ +export const ResourcePresetServiceService = { + /** + * Returns the specified resource preset. + * + * To get the list of available resource presets, make a [List] request. + */ + get: { + path: "/yandex.cloud.mdb.opensearch.v1.ResourcePresetService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetResourcePresetRequest) => + Buffer.from(GetResourcePresetRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + GetResourcePresetRequest.decode(value), + responseSerialize: (value: ResourcePreset) => + Buffer.from(ResourcePreset.encode(value).finish()), + responseDeserialize: (value: Buffer) => ResourcePreset.decode(value), + }, + /** Retrieves the list of available resource presets. */ + list: { + path: "/yandex.cloud.mdb.opensearch.v1.ResourcePresetService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListResourcePresetsRequest) => + Buffer.from(ListResourcePresetsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListResourcePresetsRequest.decode(value), + responseSerialize: (value: ListResourcePresetsResponse) => + Buffer.from(ListResourcePresetsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListResourcePresetsResponse.decode(value), + }, +} as const; + +export interface ResourcePresetServiceServer + extends UntypedServiceImplementation { + /** + * Returns the specified resource preset. + * + * To get the list of available resource presets, make a [List] request. + */ + get: handleUnaryCall; + /** Retrieves the list of available resource presets. */ + list: handleUnaryCall< + ListResourcePresetsRequest, + ListResourcePresetsResponse + >; +} + +export interface ResourcePresetServiceClient extends Client { + /** + * Returns the specified resource preset. + * + * To get the list of available resource presets, make a [List] request. + */ + get( + request: GetResourcePresetRequest, + callback: (error: ServiceError | null, response: ResourcePreset) => void + ): ClientUnaryCall; + get( + request: GetResourcePresetRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: ResourcePreset) => void + ): ClientUnaryCall; + get( + request: GetResourcePresetRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: ResourcePreset) => void + ): ClientUnaryCall; + /** Retrieves the list of available resource presets. */ + list( + request: ListResourcePresetsRequest, + callback: ( + error: ServiceError | null, + response: ListResourcePresetsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListResourcePresetsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListResourcePresetsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListResourcePresetsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListResourcePresetsResponse + ) => void + ): ClientUnaryCall; +} + +export const ResourcePresetServiceClient = makeGenericClientConstructor( + ResourcePresetServiceService, + "yandex.cloud.mdb.opensearch.v1.ResourcePresetService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ResourcePresetServiceClient; + service: typeof ResourcePresetServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/backup.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/backup.ts index dcf8e2ea..5d777bf5 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/backup.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/backup.ts @@ -25,12 +25,14 @@ export interface Backup { sourceClusterId: string; /** Time when the backup operation was started. */ startedAt?: Date; - /** Size of backup in bytes */ + /** Size of backup, in bytes */ size: number; /** How this backup was created (manual/automatic/etc...) */ type: Backup_BackupCreationType; /** Method of backup creation */ method: Backup_BackupMethod; + /** Size of the journal associated with backup, in bytes */ + journalSize: number; } export enum Backup_BackupMethod { @@ -125,6 +127,7 @@ const baseBackup: object = { size: 0, type: 0, method: 0, + journalSize: 0, }; export const Backup = { @@ -164,6 +167,9 @@ export const Backup = { if (message.method !== 0) { writer.uint32(64).int32(message.method); } + if (message.journalSize !== 0) { + writer.uint32(72).int64(message.journalSize); + } return writer; }, @@ -202,6 +208,9 @@ export const Backup = { case 8: message.method = reader.int32() as any; break; + case 9: + message.journalSize = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -242,6 +251,10 @@ export const Backup = { object.method !== undefined && object.method !== null ? backup_BackupMethodFromJSON(object.method) : 0; + message.journalSize = + object.journalSize !== undefined && object.journalSize !== null + ? Number(object.journalSize) + : 0; return message; }, @@ -260,6 +273,8 @@ export const Backup = { (obj.type = backup_BackupCreationTypeToJSON(message.type)); message.method !== undefined && (obj.method = backup_BackupMethodToJSON(message.method)); + message.journalSize !== undefined && + (obj.journalSize = Math.round(message.journalSize)); return obj; }, @@ -273,6 +288,7 @@ export const Backup = { message.size = object.size ?? 0; message.type = object.type ?? 0; message.method = object.method ?? 0; + message.journalSize = object.journalSize ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/backup_service.ts index a1762ccf..fbc9b1e4 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/backup_service.ts @@ -15,6 +15,7 @@ import { } from "@grpc/grpc-js"; import _m0 from "protobufjs/minimal"; import { Backup } from "../../../../../yandex/cloud/mdb/postgresql/v1/backup"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; @@ -60,6 +61,20 @@ export interface ListBackupsResponse { nextPageToken: string; } +export interface DeleteBackupRequest { + $type: "yandex.cloud.mdb.postgresql.v1.DeleteBackupRequest"; + /** Required. ID of the backup to delete. */ + backupId: string; +} + +export interface DeleteBackupMetadata { + $type: "yandex.cloud.mdb.postgresql.v1.DeleteBackupMetadata"; + /** Required. ID of the PostgreSQL backup that is currently being deleted. */ + backupId: string; + /** ID of the cluster which backup belonged to. */ + clusterId: string; +} + const baseGetBackupRequest: object = { $type: "yandex.cloud.mdb.postgresql.v1.GetBackupRequest", backupId: "", @@ -292,6 +307,146 @@ export const ListBackupsResponse = { messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); +const baseDeleteBackupRequest: object = { + $type: "yandex.cloud.mdb.postgresql.v1.DeleteBackupRequest", + backupId: "", +}; + +export const DeleteBackupRequest = { + $type: "yandex.cloud.mdb.postgresql.v1.DeleteBackupRequest" as const, + + encode( + message: DeleteBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBackupRequest { + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: DeleteBackupRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBackupRequest { + const message = { ...baseDeleteBackupRequest } as DeleteBackupRequest; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBackupRequest.$type, DeleteBackupRequest); + +const baseDeleteBackupMetadata: object = { + $type: "yandex.cloud.mdb.postgresql.v1.DeleteBackupMetadata", + backupId: "", + clusterId: "", +}; + +export const DeleteBackupMetadata = { + $type: "yandex.cloud.mdb.postgresql.v1.DeleteBackupMetadata" as const, + + encode( + message: DeleteBackupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + if (message.clusterId !== "") { + writer.uint32(18).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBackupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + case 2: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBackupMetadata { + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: DeleteBackupMetadata): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBackupMetadata { + const message = { ...baseDeleteBackupMetadata } as DeleteBackupMetadata; + message.backupId = object.backupId ?? ""; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBackupMetadata.$type, DeleteBackupMetadata); + /** A set of methods for managing PostgreSQL Backup resources. */ export const BackupServiceService = { /** @@ -322,6 +477,18 @@ export const BackupServiceService = { Buffer.from(ListBackupsResponse.encode(value).finish()), responseDeserialize: (value: Buffer) => ListBackupsResponse.decode(value), }, + /** Deletes the specified PostgreSQL cluster backup. */ + delete: { + path: "/yandex.cloud.mdb.postgresql.v1.BackupService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteBackupRequest) => + Buffer.from(DeleteBackupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteBackupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, } as const; export interface BackupServiceServer extends UntypedServiceImplementation { @@ -333,6 +500,8 @@ export interface BackupServiceServer extends UntypedServiceImplementation { get: handleUnaryCall; /** Retrieves the list of Backup resources available for the specified folder. */ list: handleUnaryCall; + /** Deletes the specified PostgreSQL cluster backup. */ + delete: handleUnaryCall; } export interface BackupServiceClient extends Client { @@ -381,6 +550,22 @@ export interface BackupServiceClient extends Client { response: ListBackupsResponse ) => void ): ClientUnaryCall; + /** Deletes the specified PostgreSQL cluster backup. */ + delete( + request: DeleteBackupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteBackupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteBackupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; } export const BackupServiceClient = makeGenericClientConstructor( diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts index c79362b7..0c42c5d1 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts @@ -1,5 +1,5 @@ /* eslint-disable */ -import {MessageType, messageTypeRegistry} from "../../../../../typeRegistry"; +import { messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import _m0 from "protobufjs/minimal"; import { @@ -19,6 +19,10 @@ import { PostgresqlConfigSet13 } from "../../../../../yandex/cloud/mdb/postgresq import { Postgresqlconfigset131c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c"; import { PostgresqlConfigSet14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14"; import { Postgresqlconfigset141c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c"; +import { PostgresqlConfigSet15 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql15"; +import { Postgresqlconfigset151c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql15_1c"; +import { PostgresqlConfigSet16 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql16"; +import { Postgresqlconfigset161c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql16_1c"; import { Postgresqlhostconfig96 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host9_6"; import { Postgresqlhostconfig101c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10_1c"; import { PostgresqlHostConfig10 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10"; @@ -30,6 +34,10 @@ import { PostgresqlHostConfig13 } from "../../../../../yandex/cloud/mdb/postgres import { Postgresqlhostconfig131c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host13_1c"; import { PostgresqlHostConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14"; import { Postgresqlhostconfig141c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14_1c"; +import { PostgresqlHostConfig15 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host15"; +import { Postgresqlhostconfig151c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host15_1c"; +import { PostgresqlHostConfig16 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host16"; +import { Postgresqlhostconfig161c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host16_1c"; import { BoolValue, Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; @@ -298,6 +306,14 @@ export interface ClusterConfig { postgresqlConfig14?: PostgresqlConfigSet14 | undefined; /** Configuration of a PostgreSQL 14 1C server. */ postgresqlConfig141c?: Postgresqlconfigset141c | undefined; + /** Configuration of a PostgreSQL 15 server. */ + postgresqlConfig15?: PostgresqlConfigSet15 | undefined; + /** Configuration of a PostgreSQL 15 1C server. */ + postgresqlConfig151c?: Postgresqlconfigset151c | undefined; + /** Configuration of a PostgreSQL 16 server. */ + postgresqlConfig16?: PostgresqlConfigSet16 | undefined; + /** Configuration of a PostgreSQL 16 1C server. */ + postgresqlConfig161c?: Postgresqlconfigset161c | undefined; /** Configuration of the connection pooler. */ poolerConfig?: ConnectionPoolerConfig; /** Resources allocated to PostgreSQL hosts. */ @@ -312,6 +328,8 @@ export interface ClusterConfig { access?: Access; /** Configuration of the performance diagnostics service. */ performanceDiagnostics?: PerformanceDiagnostics; + /** Disk size autoscaling */ + diskSizeAutoscaling?: DiskSizeAutoscaling; } export interface ConnectionPoolerConfig { @@ -512,6 +530,8 @@ export enum Host_Health { DEAD = 2, /** DEGRADED - The host is degraded, and can perform only some of its essential functions. */ DEGRADED = 3, + /** READONLY - The host is alive, but in read-only mode. */ + READONLY = 4, UNRECOGNIZED = -1, } @@ -529,6 +549,9 @@ export function host_HealthFromJSON(object: any): Host_Health { case 3: case "DEGRADED": return Host_Health.DEGRADED; + case 4: + case "READONLY": + return Host_Health.READONLY; case -1: case "UNRECOGNIZED": default: @@ -546,6 +569,8 @@ export function host_HealthToJSON(object: Host_Health): string { return "DEAD"; case Host_Health.DEGRADED: return "DEGRADED"; + case Host_Health.READONLY: + return "READONLY"; default: return "UNKNOWN"; } @@ -575,6 +600,14 @@ export interface HostConfig { postgresqlConfig14?: PostgresqlHostConfig14 | undefined; /** Configuration for a host with PostgreSQL 14 1C server deployed. */ postgresqlConfig141c?: Postgresqlhostconfig141c | undefined; + /** Configuration for a host with PostgreSQL 15 server deployed. */ + postgresqlConfig15?: PostgresqlHostConfig15 | undefined; + /** Configuration for a host with PostgreSQL 15 1C server deployed. */ + postgresqlConfig151c?: Postgresqlhostconfig151c | undefined; + /** Configuration for a host with PostgreSQL 16 server deployed. */ + postgresqlConfig16?: PostgresqlHostConfig16 | undefined; + /** Configuration for a host with PostgreSQL 16 1C server deployed. */ + postgresqlConfig161c?: Postgresqlhostconfig161c | undefined; } export interface Service { @@ -632,6 +665,8 @@ export enum Service_Health { ALIVE = 1, /** DEAD - The server is dead or unresponsive. */ DEAD = 2, + /** READONLY - The server is in read-only mode. */ + READONLY = 3, UNRECOGNIZED = -1, } @@ -646,6 +681,9 @@ export function service_HealthFromJSON(object: any): Service_Health { case 2: case "DEAD": return Service_Health.DEAD; + case 3: + case "READONLY": + return Service_Health.READONLY; case -1: case "UNRECOGNIZED": default: @@ -661,6 +699,8 @@ export function service_HealthToJSON(object: Service_Health): string { return "ALIVE"; case Service_Health.DEAD: return "DEAD"; + case Service_Health.READONLY: + return "READONLY"; default: return "UNKNOWN"; } @@ -699,6 +739,8 @@ export interface Access { serverless: boolean; /** Allow access for DataTransfer. */ dataTransfer: boolean; + /** Allow access for YandexQuery. */ + yandexQuery: boolean; } export interface PerformanceDiagnostics { @@ -711,6 +753,16 @@ export interface PerformanceDiagnostics { statementsSamplingInterval: number; } +export interface DiskSizeAutoscaling { + $type: "yandex.cloud.mdb.postgresql.v1.DiskSizeAutoscaling"; + /** Amount of used storage for automatic disk scaling in the maintenance window, 0 means disabled, in percent. */ + plannedUsageThreshold: number; + /** Amount of used storage for immediately automatic disk scaling, 0 means disabled, in percent. */ + emergencyUsageThreshold: number; + /** Limit on how large the storage for database instances can automatically grow, in bytes. */ + diskSizeLimit: number; +} + const baseCluster: object = { $type: "yandex.cloud.mdb.postgresql.v1.Cluster", id: "", @@ -726,7 +778,7 @@ const baseCluster: object = { hostGroupIds: "", }; -export const Cluster: MessageType = { +export const Cluster = { $type: "yandex.cloud.mdb.postgresql.v1.Cluster" as const, encode( @@ -1222,7 +1274,7 @@ const baseClusterConfig: object = { version: "", }; -export const ClusterConfig: MessageType = { +export const ClusterConfig = { $type: "yandex.cloud.mdb.postgresql.v1.ClusterConfig" as const, encode( @@ -1298,6 +1350,30 @@ export const ClusterConfig: MessageType = { writer.uint32(154).fork() ).ldelim(); } + if (message.postgresqlConfig15 !== undefined) { + PostgresqlConfigSet15.encode( + message.postgresqlConfig15, + writer.uint32(170).fork() + ).ldelim(); + } + if (message.postgresqlConfig151c !== undefined) { + Postgresqlconfigset151c.encode( + message.postgresqlConfig151c, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.postgresqlConfig16 !== undefined) { + PostgresqlConfigSet16.encode( + message.postgresqlConfig16, + writer.uint32(194).fork() + ).ldelim(); + } + if (message.postgresqlConfig161c !== undefined) { + Postgresqlconfigset161c.encode( + message.postgresqlConfig161c, + writer.uint32(202).fork() + ).ldelim(); + } if (message.poolerConfig !== undefined) { ConnectionPoolerConfig.encode( message.poolerConfig, @@ -1337,6 +1413,12 @@ export const ClusterConfig: MessageType = { writer.uint32(98).fork() ).ldelim(); } + if (message.diskSizeAutoscaling !== undefined) { + DiskSizeAutoscaling.encode( + message.diskSizeAutoscaling, + writer.uint32(186).fork() + ).ldelim(); + } return writer; }, @@ -1416,6 +1498,30 @@ export const ClusterConfig: MessageType = { reader.uint32() ); break; + case 21: + message.postgresqlConfig15 = PostgresqlConfigSet15.decode( + reader, + reader.uint32() + ); + break; + case 22: + message.postgresqlConfig151c = Postgresqlconfigset151c.decode( + reader, + reader.uint32() + ); + break; + case 24: + message.postgresqlConfig16 = PostgresqlConfigSet16.decode( + reader, + reader.uint32() + ); + break; + case 25: + message.postgresqlConfig161c = Postgresqlconfigset161c.decode( + reader, + reader.uint32() + ); + break; case 4: message.poolerConfig = ConnectionPoolerConfig.decode( reader, @@ -1449,6 +1555,12 @@ export const ClusterConfig: MessageType = { reader.uint32() ); break; + case 23: + message.diskSizeAutoscaling = DiskSizeAutoscaling.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -1518,6 +1630,26 @@ export const ClusterConfig: MessageType = { object.postgresqlConfig_14_1c !== null ? Postgresqlconfigset141c.fromJSON(object.postgresqlConfig_14_1c) : undefined; + message.postgresqlConfig15 = + object.postgresqlConfig_15 !== undefined && + object.postgresqlConfig_15 !== null + ? PostgresqlConfigSet15.fromJSON(object.postgresqlConfig_15) + : undefined; + message.postgresqlConfig151c = + object.postgresqlConfig_15_1c !== undefined && + object.postgresqlConfig_15_1c !== null + ? Postgresqlconfigset151c.fromJSON(object.postgresqlConfig_15_1c) + : undefined; + message.postgresqlConfig16 = + object.postgresqlConfig_16 !== undefined && + object.postgresqlConfig_16 !== null + ? PostgresqlConfigSet16.fromJSON(object.postgresqlConfig_16) + : undefined; + message.postgresqlConfig161c = + object.postgresqlConfig_16_1c !== undefined && + object.postgresqlConfig_16_1c !== null + ? Postgresqlconfigset161c.fromJSON(object.postgresqlConfig_16_1c) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromJSON(object.poolerConfig) @@ -1549,6 +1681,11 @@ export const ClusterConfig: MessageType = { object.performanceDiagnostics !== null ? PerformanceDiagnostics.fromJSON(object.performanceDiagnostics) : undefined; + message.diskSizeAutoscaling = + object.diskSizeAutoscaling !== undefined && + object.diskSizeAutoscaling !== null + ? DiskSizeAutoscaling.fromJSON(object.diskSizeAutoscaling) + : undefined; return message; }, @@ -1599,6 +1736,22 @@ export const ClusterConfig: MessageType = { (obj.postgresqlConfig_14_1c = message.postgresqlConfig141c ? Postgresqlconfigset141c.toJSON(message.postgresqlConfig141c) : undefined); + message.postgresqlConfig15 !== undefined && + (obj.postgresqlConfig_15 = message.postgresqlConfig15 + ? PostgresqlConfigSet15.toJSON(message.postgresqlConfig15) + : undefined); + message.postgresqlConfig151c !== undefined && + (obj.postgresqlConfig_15_1c = message.postgresqlConfig151c + ? Postgresqlconfigset151c.toJSON(message.postgresqlConfig151c) + : undefined); + message.postgresqlConfig16 !== undefined && + (obj.postgresqlConfig_16 = message.postgresqlConfig16 + ? PostgresqlConfigSet16.toJSON(message.postgresqlConfig16) + : undefined); + message.postgresqlConfig161c !== undefined && + (obj.postgresqlConfig_16_1c = message.postgresqlConfig161c + ? Postgresqlconfigset161c.toJSON(message.postgresqlConfig161c) + : undefined); message.poolerConfig !== undefined && (obj.poolerConfig = message.poolerConfig ? ConnectionPoolerConfig.toJSON(message.poolerConfig) @@ -1621,6 +1774,10 @@ export const ClusterConfig: MessageType = { (obj.performanceDiagnostics = message.performanceDiagnostics ? PerformanceDiagnostics.toJSON(message.performanceDiagnostics) : undefined); + message.diskSizeAutoscaling !== undefined && + (obj.diskSizeAutoscaling = message.diskSizeAutoscaling + ? DiskSizeAutoscaling.toJSON(message.diskSizeAutoscaling) + : undefined); return obj; }, @@ -1684,6 +1841,26 @@ export const ClusterConfig: MessageType = { object.postgresqlConfig141c !== null ? Postgresqlconfigset141c.fromPartial(object.postgresqlConfig141c) : undefined; + message.postgresqlConfig15 = + object.postgresqlConfig15 !== undefined && + object.postgresqlConfig15 !== null + ? PostgresqlConfigSet15.fromPartial(object.postgresqlConfig15) + : undefined; + message.postgresqlConfig151c = + object.postgresqlConfig151c !== undefined && + object.postgresqlConfig151c !== null + ? Postgresqlconfigset151c.fromPartial(object.postgresqlConfig151c) + : undefined; + message.postgresqlConfig16 = + object.postgresqlConfig16 !== undefined && + object.postgresqlConfig16 !== null + ? PostgresqlConfigSet16.fromPartial(object.postgresqlConfig16) + : undefined; + message.postgresqlConfig161c = + object.postgresqlConfig161c !== undefined && + object.postgresqlConfig161c !== null + ? Postgresqlconfigset161c.fromPartial(object.postgresqlConfig161c) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromPartial(object.poolerConfig) @@ -1708,6 +1885,11 @@ export const ClusterConfig: MessageType = { object.performanceDiagnostics !== null ? PerformanceDiagnostics.fromPartial(object.performanceDiagnostics) : undefined; + message.diskSizeAutoscaling = + object.diskSizeAutoscaling !== undefined && + object.diskSizeAutoscaling !== null + ? DiskSizeAutoscaling.fromPartial(object.diskSizeAutoscaling) + : undefined; return message; }, }; @@ -2111,6 +2293,30 @@ export const HostConfig = { writer.uint32(90).fork() ).ldelim(); } + if (message.postgresqlConfig15 !== undefined) { + PostgresqlHostConfig15.encode( + message.postgresqlConfig15, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.postgresqlConfig151c !== undefined) { + Postgresqlhostconfig151c.encode( + message.postgresqlConfig151c, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.postgresqlConfig16 !== undefined) { + PostgresqlHostConfig16.encode( + message.postgresqlConfig16, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.postgresqlConfig161c !== undefined) { + Postgresqlhostconfig161c.encode( + message.postgresqlConfig161c, + writer.uint32(122).fork() + ).ldelim(); + } return writer; }, @@ -2187,6 +2393,30 @@ export const HostConfig = { reader.uint32() ); break; + case 12: + message.postgresqlConfig15 = PostgresqlHostConfig15.decode( + reader, + reader.uint32() + ); + break; + case 13: + message.postgresqlConfig151c = Postgresqlhostconfig151c.decode( + reader, + reader.uint32() + ); + break; + case 14: + message.postgresqlConfig16 = PostgresqlHostConfig16.decode( + reader, + reader.uint32() + ); + break; + case 15: + message.postgresqlConfig161c = Postgresqlhostconfig161c.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -2252,6 +2482,26 @@ export const HostConfig = { object.postgresqlHostConfig_14_1c !== null ? Postgresqlhostconfig141c.fromJSON(object.postgresqlHostConfig_14_1c) : undefined; + message.postgresqlConfig15 = + object.postgresqlHostConfig_15 !== undefined && + object.postgresqlHostConfig_15 !== null + ? PostgresqlHostConfig15.fromJSON(object.postgresqlHostConfig_15) + : undefined; + message.postgresqlConfig151c = + object.postgresqlHostConfig_15_1c !== undefined && + object.postgresqlHostConfig_15_1c !== null + ? Postgresqlhostconfig151c.fromJSON(object.postgresqlHostConfig_15_1c) + : undefined; + message.postgresqlConfig16 = + object.postgresqlHostConfig_16 !== undefined && + object.postgresqlHostConfig_16 !== null + ? PostgresqlHostConfig16.fromJSON(object.postgresqlHostConfig_16) + : undefined; + message.postgresqlConfig161c = + object.postgresqlHostConfig_16_1c !== undefined && + object.postgresqlHostConfig_16_1c !== null + ? Postgresqlhostconfig161c.fromJSON(object.postgresqlHostConfig_16_1c) + : undefined; return message; }, @@ -2301,6 +2551,22 @@ export const HostConfig = { (obj.postgresqlHostConfig_14_1c = message.postgresqlConfig141c ? Postgresqlhostconfig141c.toJSON(message.postgresqlConfig141c) : undefined); + message.postgresqlConfig15 !== undefined && + (obj.postgresqlHostConfig_15 = message.postgresqlConfig15 + ? PostgresqlHostConfig15.toJSON(message.postgresqlConfig15) + : undefined); + message.postgresqlConfig151c !== undefined && + (obj.postgresqlHostConfig_15_1c = message.postgresqlConfig151c + ? Postgresqlhostconfig151c.toJSON(message.postgresqlConfig151c) + : undefined); + message.postgresqlConfig16 !== undefined && + (obj.postgresqlHostConfig_16 = message.postgresqlConfig16 + ? PostgresqlHostConfig16.toJSON(message.postgresqlConfig16) + : undefined); + message.postgresqlConfig161c !== undefined && + (obj.postgresqlHostConfig_16_1c = message.postgresqlConfig161c + ? Postgresqlhostconfig161c.toJSON(message.postgresqlConfig161c) + : undefined); return obj; }, @@ -2363,6 +2629,26 @@ export const HostConfig = { object.postgresqlConfig141c !== null ? Postgresqlhostconfig141c.fromPartial(object.postgresqlConfig141c) : undefined; + message.postgresqlConfig15 = + object.postgresqlConfig15 !== undefined && + object.postgresqlConfig15 !== null + ? PostgresqlHostConfig15.fromPartial(object.postgresqlConfig15) + : undefined; + message.postgresqlConfig151c = + object.postgresqlConfig151c !== undefined && + object.postgresqlConfig151c !== null + ? Postgresqlhostconfig151c.fromPartial(object.postgresqlConfig151c) + : undefined; + message.postgresqlConfig16 = + object.postgresqlConfig16 !== undefined && + object.postgresqlConfig16 !== null + ? PostgresqlHostConfig16.fromPartial(object.postgresqlConfig16) + : undefined; + message.postgresqlConfig161c = + object.postgresqlConfig161c !== undefined && + object.postgresqlConfig161c !== null + ? Postgresqlhostconfig161c.fromPartial(object.postgresqlConfig161c) + : undefined; return message; }, }; @@ -2539,6 +2825,7 @@ const baseAccess: object = { webSql: false, serverless: false, dataTransfer: false, + yandexQuery: false, }; export const Access = { @@ -2560,6 +2847,9 @@ export const Access = { if (message.dataTransfer === true) { writer.uint32(32).bool(message.dataTransfer); } + if (message.yandexQuery === true) { + writer.uint32(40).bool(message.yandexQuery); + } return writer; }, @@ -2582,6 +2872,9 @@ export const Access = { case 4: message.dataTransfer = reader.bool(); break; + case 5: + message.yandexQuery = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -2608,6 +2901,10 @@ export const Access = { object.dataTransfer !== undefined && object.dataTransfer !== null ? Boolean(object.dataTransfer) : false; + message.yandexQuery = + object.yandexQuery !== undefined && object.yandexQuery !== null + ? Boolean(object.yandexQuery) + : false; return message; }, @@ -2618,6 +2915,8 @@ export const Access = { message.serverless !== undefined && (obj.serverless = message.serverless); message.dataTransfer !== undefined && (obj.dataTransfer = message.dataTransfer); + message.yandexQuery !== undefined && + (obj.yandexQuery = message.yandexQuery); return obj; }, @@ -2627,6 +2926,7 @@ export const Access = { message.webSql = object.webSql ?? false; message.serverless = object.serverless ?? false; message.dataTransfer = object.dataTransfer ?? false; + message.yandexQuery = object.yandexQuery ?? false; return message; }, }; @@ -2736,6 +3036,103 @@ export const PerformanceDiagnostics = { messageTypeRegistry.set(PerformanceDiagnostics.$type, PerformanceDiagnostics); +const baseDiskSizeAutoscaling: object = { + $type: "yandex.cloud.mdb.postgresql.v1.DiskSizeAutoscaling", + plannedUsageThreshold: 0, + emergencyUsageThreshold: 0, + diskSizeLimit: 0, +}; + +export const DiskSizeAutoscaling = { + $type: "yandex.cloud.mdb.postgresql.v1.DiskSizeAutoscaling" as const, + + encode( + message: DiskSizeAutoscaling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.plannedUsageThreshold !== 0) { + writer.uint32(8).int64(message.plannedUsageThreshold); + } + if (message.emergencyUsageThreshold !== 0) { + writer.uint32(16).int64(message.emergencyUsageThreshold); + } + if (message.diskSizeLimit !== 0) { + writer.uint32(24).int64(message.diskSizeLimit); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DiskSizeAutoscaling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDiskSizeAutoscaling } as DiskSizeAutoscaling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.plannedUsageThreshold = longToNumber(reader.int64() as Long); + break; + case 2: + message.emergencyUsageThreshold = longToNumber( + reader.int64() as Long + ); + break; + case 3: + message.diskSizeLimit = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DiskSizeAutoscaling { + const message = { ...baseDiskSizeAutoscaling } as DiskSizeAutoscaling; + message.plannedUsageThreshold = + object.plannedUsageThreshold !== undefined && + object.plannedUsageThreshold !== null + ? Number(object.plannedUsageThreshold) + : 0; + message.emergencyUsageThreshold = + object.emergencyUsageThreshold !== undefined && + object.emergencyUsageThreshold !== null + ? Number(object.emergencyUsageThreshold) + : 0; + message.diskSizeLimit = + object.diskSizeLimit !== undefined && object.diskSizeLimit !== null + ? Number(object.diskSizeLimit) + : 0; + return message; + }, + + toJSON(message: DiskSizeAutoscaling): unknown { + const obj: any = {}; + message.plannedUsageThreshold !== undefined && + (obj.plannedUsageThreshold = Math.round(message.plannedUsageThreshold)); + message.emergencyUsageThreshold !== undefined && + (obj.emergencyUsageThreshold = Math.round( + message.emergencyUsageThreshold + )); + message.diskSizeLimit !== undefined && + (obj.diskSizeLimit = Math.round(message.diskSizeLimit)); + return obj; + }, + + fromPartial, I>>( + object: I + ): DiskSizeAutoscaling { + const message = { ...baseDiskSizeAutoscaling } as DiskSizeAutoscaling; + message.plannedUsageThreshold = object.plannedUsageThreshold ?? 0; + message.emergencyUsageThreshold = object.emergencyUsageThreshold ?? 0; + message.diskSizeLimit = object.diskSizeLimit ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(DiskSizeAutoscaling.$type, DiskSizeAutoscaling); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts index f892fa7b..22ee5df5 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts @@ -1,5 +1,5 @@ /* eslint-disable */ -import {MessageType, messageTypeRegistry} from "../../../../../typeRegistry"; +import { messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import { makeGenericClientConstructor, @@ -22,6 +22,7 @@ import { Resources, Access, PerformanceDiagnostics, + DiskSizeAutoscaling, Cluster, Host, cluster_EnvironmentFromJSON, @@ -46,6 +47,10 @@ import { PostgresqlConfig13 } from "../../../../../yandex/cloud/mdb/postgresql/v import { Postgresqlconfig131c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c"; import { PostgresqlConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14"; import { Postgresqlconfig141c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c"; +import { PostgresqlConfig15 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql15"; +import { Postgresqlconfig151c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql15_1c"; +import { PostgresqlConfig16 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql16"; +import { Postgresqlconfig161c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql16_1c"; import { Postgresqlhostconfig96 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host9_6"; import { Postgresqlhostconfig101c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10_1c"; import { PostgresqlHostConfig10 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10"; @@ -57,6 +62,10 @@ import { PostgresqlHostConfig13 } from "../../../../../yandex/cloud/mdb/postgres import { Postgresqlhostconfig131c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host13_1c"; import { PostgresqlHostConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14"; import { Postgresqlhostconfig141c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14_1c"; +import { PostgresqlHostConfig15 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host15"; +import { Postgresqlhostconfig151c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host15_1c"; +import { PostgresqlHostConfig16 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host16"; +import { Postgresqlhostconfig161c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host16_1c"; import { Int64Value, BoolValue } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; @@ -269,6 +278,8 @@ export interface BackupClusterMetadata { $type: "yandex.cloud.mdb.postgresql.v1.BackupClusterMetadata"; /** ID of the PostgreSQL cluster that is being backed up. */ clusterId: string; + /** ID of the PostgreSQL backup that is being created. */ + backupId: string; } export interface RestoreClusterRequest { @@ -836,7 +847,7 @@ export interface ConfigSpec { $type: "yandex.cloud.mdb.postgresql.v1.ConfigSpec"; /** * Version of PostgreSQL used in the cluster. - * Possible values: `9.6`, `10`, `10_1c`, `11`, `12`, `13`. + * Possible values: `9.6`, `10`, `10_1c`, `11`, `12`, `13`, `14`, `15` */ version: string; /** Configuration for a PostgreSQL 9.6 cluster. */ @@ -861,6 +872,14 @@ export interface ConfigSpec { postgresqlConfig14?: PostgresqlConfig14 | undefined; /** Configuration for a PostgreSQL 14 1C cluster. */ postgresqlConfig141c?: Postgresqlconfig141c | undefined; + /** Configuration for a PostgreSQL 15 cluster. */ + postgresqlConfig15?: PostgresqlConfig15 | undefined; + /** Configuration for a PostgreSQL 15 1C cluster. */ + postgresqlConfig151c?: Postgresqlconfig151c | undefined; + /** Configuration for a PostgreSQL 16 cluster. */ + postgresqlConfig16?: PostgresqlConfig16 | undefined; + /** Configuration for a PostgreSQL 16 1C cluster. */ + postgresqlConfig161c?: Postgresqlconfig161c | undefined; /** Configuration of the connection pooler. */ poolerConfig?: ConnectionPoolerConfig; /** Resources allocated to PostgreSQL hosts. */ @@ -875,6 +894,8 @@ export interface ConfigSpec { access?: Access; /** Configuration of the performance diagnostics service. */ performanceDiagnostics?: PerformanceDiagnostics; + /** Disk size autoscaling */ + diskSizeAutoscaling?: DiskSizeAutoscaling; } export interface ConfigHostSpec { @@ -901,6 +922,14 @@ export interface ConfigHostSpec { postgresqlConfig14?: PostgresqlHostConfig14 | undefined; /** Configuration for a host with PostgreSQL 14 1C server deployed. */ postgresqlConfig141c?: Postgresqlhostconfig141c | undefined; + /** Configuration for a host with PostgreSQL 15 server deployed. */ + postgresqlConfig15?: PostgresqlHostConfig15 | undefined; + /** Configuration for a host with PostgreSQL 15 1C server deployed. */ + postgresqlConfig151c?: Postgresqlhostconfig151c | undefined; + /** Configuration for a host with PostgreSQL 16 server deployed. */ + postgresqlConfig16?: PostgresqlHostConfig16 | undefined; + /** Configuration for a host with PostgreSQL 16 1C server deployed. */ + postgresqlConfig161c?: Postgresqlhostconfig161c | undefined; } const baseGetClusterRequest: object = { @@ -1072,7 +1101,7 @@ const baseListClustersResponse: object = { nextPageToken: "", }; -export const ListClustersResponse: MessageType = { +export const ListClustersResponse = { $type: "yandex.cloud.mdb.postgresql.v1.ListClustersResponse" as const, encode( @@ -2584,6 +2613,7 @@ messageTypeRegistry.set(BackupClusterRequest.$type, BackupClusterRequest); const baseBackupClusterMetadata: object = { $type: "yandex.cloud.mdb.postgresql.v1.BackupClusterMetadata", clusterId: "", + backupId: "", }; export const BackupClusterMetadata = { @@ -2596,6 +2626,9 @@ export const BackupClusterMetadata = { if (message.clusterId !== "") { writer.uint32(10).string(message.clusterId); } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } return writer; }, @@ -2612,6 +2645,9 @@ export const BackupClusterMetadata = { case 1: message.clusterId = reader.string(); break; + case 2: + message.backupId = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -2626,12 +2662,17 @@ export const BackupClusterMetadata = { object.clusterId !== undefined && object.clusterId !== null ? String(object.clusterId) : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; return message; }, toJSON(message: BackupClusterMetadata): unknown { const obj: any = {}; message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.backupId !== undefined && (obj.backupId = message.backupId); return obj; }, @@ -2640,6 +2681,7 @@ export const BackupClusterMetadata = { ): BackupClusterMetadata { const message = { ...baseBackupClusterMetadata } as BackupClusterMetadata; message.clusterId = object.clusterId ?? ""; + message.backupId = object.backupId ?? ""; return message; }, }; @@ -5628,6 +5670,30 @@ export const ConfigSpec = { writer.uint32(154).fork() ).ldelim(); } + if (message.postgresqlConfig15 !== undefined) { + PostgresqlConfig15.encode( + message.postgresqlConfig15, + writer.uint32(170).fork() + ).ldelim(); + } + if (message.postgresqlConfig151c !== undefined) { + Postgresqlconfig151c.encode( + message.postgresqlConfig151c, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.postgresqlConfig16 !== undefined) { + PostgresqlConfig16.encode( + message.postgresqlConfig16, + writer.uint32(194).fork() + ).ldelim(); + } + if (message.postgresqlConfig161c !== undefined) { + Postgresqlconfig161c.encode( + message.postgresqlConfig161c, + writer.uint32(202).fork() + ).ldelim(); + } if (message.poolerConfig !== undefined) { ConnectionPoolerConfig.encode( message.poolerConfig, @@ -5667,6 +5733,12 @@ export const ConfigSpec = { writer.uint32(98).fork() ).ldelim(); } + if (message.diskSizeAutoscaling !== undefined) { + DiskSizeAutoscaling.encode( + message.diskSizeAutoscaling, + writer.uint32(186).fork() + ).ldelim(); + } return writer; }, @@ -5746,6 +5818,30 @@ export const ConfigSpec = { reader.uint32() ); break; + case 21: + message.postgresqlConfig15 = PostgresqlConfig15.decode( + reader, + reader.uint32() + ); + break; + case 22: + message.postgresqlConfig151c = Postgresqlconfig151c.decode( + reader, + reader.uint32() + ); + break; + case 24: + message.postgresqlConfig16 = PostgresqlConfig16.decode( + reader, + reader.uint32() + ); + break; + case 25: + message.postgresqlConfig161c = Postgresqlconfig161c.decode( + reader, + reader.uint32() + ); + break; case 4: message.poolerConfig = ConnectionPoolerConfig.decode( reader, @@ -5779,6 +5875,12 @@ export const ConfigSpec = { reader.uint32() ); break; + case 23: + message.diskSizeAutoscaling = DiskSizeAutoscaling.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -5848,6 +5950,26 @@ export const ConfigSpec = { object.postgresqlConfig_14_1c !== null ? Postgresqlconfig141c.fromJSON(object.postgresqlConfig_14_1c) : undefined; + message.postgresqlConfig15 = + object.postgresqlConfig_15 !== undefined && + object.postgresqlConfig_15 !== null + ? PostgresqlConfig15.fromJSON(object.postgresqlConfig_15) + : undefined; + message.postgresqlConfig151c = + object.postgresqlConfig_15_1c !== undefined && + object.postgresqlConfig_15_1c !== null + ? Postgresqlconfig151c.fromJSON(object.postgresqlConfig_15_1c) + : undefined; + message.postgresqlConfig16 = + object.postgresqlConfig_16 !== undefined && + object.postgresqlConfig_16 !== null + ? PostgresqlConfig16.fromJSON(object.postgresqlConfig_16) + : undefined; + message.postgresqlConfig161c = + object.postgresqlConfig_16_1c !== undefined && + object.postgresqlConfig_16_1c !== null + ? Postgresqlconfig161c.fromJSON(object.postgresqlConfig_16_1c) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromJSON(object.poolerConfig) @@ -5879,6 +6001,11 @@ export const ConfigSpec = { object.performanceDiagnostics !== null ? PerformanceDiagnostics.fromJSON(object.performanceDiagnostics) : undefined; + message.diskSizeAutoscaling = + object.diskSizeAutoscaling !== undefined && + object.diskSizeAutoscaling !== null + ? DiskSizeAutoscaling.fromJSON(object.diskSizeAutoscaling) + : undefined; return message; }, @@ -5929,6 +6056,22 @@ export const ConfigSpec = { (obj.postgresqlConfig_14_1c = message.postgresqlConfig141c ? Postgresqlconfig141c.toJSON(message.postgresqlConfig141c) : undefined); + message.postgresqlConfig15 !== undefined && + (obj.postgresqlConfig_15 = message.postgresqlConfig15 + ? PostgresqlConfig15.toJSON(message.postgresqlConfig15) + : undefined); + message.postgresqlConfig151c !== undefined && + (obj.postgresqlConfig_15_1c = message.postgresqlConfig151c + ? Postgresqlconfig151c.toJSON(message.postgresqlConfig151c) + : undefined); + message.postgresqlConfig16 !== undefined && + (obj.postgresqlConfig_16 = message.postgresqlConfig16 + ? PostgresqlConfig16.toJSON(message.postgresqlConfig16) + : undefined); + message.postgresqlConfig161c !== undefined && + (obj.postgresqlConfig_16_1c = message.postgresqlConfig161c + ? Postgresqlconfig161c.toJSON(message.postgresqlConfig161c) + : undefined); message.poolerConfig !== undefined && (obj.poolerConfig = message.poolerConfig ? ConnectionPoolerConfig.toJSON(message.poolerConfig) @@ -5951,6 +6094,10 @@ export const ConfigSpec = { (obj.performanceDiagnostics = message.performanceDiagnostics ? PerformanceDiagnostics.toJSON(message.performanceDiagnostics) : undefined); + message.diskSizeAutoscaling !== undefined && + (obj.diskSizeAutoscaling = message.diskSizeAutoscaling + ? DiskSizeAutoscaling.toJSON(message.diskSizeAutoscaling) + : undefined); return obj; }, @@ -6014,6 +6161,26 @@ export const ConfigSpec = { object.postgresqlConfig141c !== null ? Postgresqlconfig141c.fromPartial(object.postgresqlConfig141c) : undefined; + message.postgresqlConfig15 = + object.postgresqlConfig15 !== undefined && + object.postgresqlConfig15 !== null + ? PostgresqlConfig15.fromPartial(object.postgresqlConfig15) + : undefined; + message.postgresqlConfig151c = + object.postgresqlConfig151c !== undefined && + object.postgresqlConfig151c !== null + ? Postgresqlconfig151c.fromPartial(object.postgresqlConfig151c) + : undefined; + message.postgresqlConfig16 = + object.postgresqlConfig16 !== undefined && + object.postgresqlConfig16 !== null + ? PostgresqlConfig16.fromPartial(object.postgresqlConfig16) + : undefined; + message.postgresqlConfig161c = + object.postgresqlConfig161c !== undefined && + object.postgresqlConfig161c !== null + ? Postgresqlconfig161c.fromPartial(object.postgresqlConfig161c) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromPartial(object.poolerConfig) @@ -6038,6 +6205,11 @@ export const ConfigSpec = { object.performanceDiagnostics !== null ? PerformanceDiagnostics.fromPartial(object.performanceDiagnostics) : undefined; + message.diskSizeAutoscaling = + object.diskSizeAutoscaling !== undefined && + object.diskSizeAutoscaling !== null + ? DiskSizeAutoscaling.fromPartial(object.diskSizeAutoscaling) + : undefined; return message; }, }; @@ -6121,6 +6293,30 @@ export const ConfigHostSpec = { writer.uint32(90).fork() ).ldelim(); } + if (message.postgresqlConfig15 !== undefined) { + PostgresqlHostConfig15.encode( + message.postgresqlConfig15, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.postgresqlConfig151c !== undefined) { + Postgresqlhostconfig151c.encode( + message.postgresqlConfig151c, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.postgresqlConfig16 !== undefined) { + PostgresqlHostConfig16.encode( + message.postgresqlConfig16, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.postgresqlConfig161c !== undefined) { + Postgresqlhostconfig161c.encode( + message.postgresqlConfig161c, + writer.uint32(122).fork() + ).ldelim(); + } return writer; }, @@ -6197,6 +6393,30 @@ export const ConfigHostSpec = { reader.uint32() ); break; + case 12: + message.postgresqlConfig15 = PostgresqlHostConfig15.decode( + reader, + reader.uint32() + ); + break; + case 13: + message.postgresqlConfig151c = Postgresqlhostconfig151c.decode( + reader, + reader.uint32() + ); + break; + case 14: + message.postgresqlConfig16 = PostgresqlHostConfig16.decode( + reader, + reader.uint32() + ); + break; + case 15: + message.postgresqlConfig161c = Postgresqlhostconfig161c.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -6262,6 +6482,26 @@ export const ConfigHostSpec = { object.postgresqlHostConfig_14_1c !== null ? Postgresqlhostconfig141c.fromJSON(object.postgresqlHostConfig_14_1c) : undefined; + message.postgresqlConfig15 = + object.postgresqlHostConfig_15 !== undefined && + object.postgresqlHostConfig_15 !== null + ? PostgresqlHostConfig15.fromJSON(object.postgresqlHostConfig_15) + : undefined; + message.postgresqlConfig151c = + object.postgresqlHostConfig_15_1c !== undefined && + object.postgresqlHostConfig_15_1c !== null + ? Postgresqlhostconfig151c.fromJSON(object.postgresqlHostConfig_15_1c) + : undefined; + message.postgresqlConfig16 = + object.postgresqlHostConfig_16 !== undefined && + object.postgresqlHostConfig_16 !== null + ? PostgresqlHostConfig16.fromJSON(object.postgresqlHostConfig_16) + : undefined; + message.postgresqlConfig161c = + object.postgresqlHostConfig_16_1c !== undefined && + object.postgresqlHostConfig_16_1c !== null + ? Postgresqlhostconfig161c.fromJSON(object.postgresqlHostConfig_16_1c) + : undefined; return message; }, @@ -6311,6 +6551,22 @@ export const ConfigHostSpec = { (obj.postgresqlHostConfig_14_1c = message.postgresqlConfig141c ? Postgresqlhostconfig141c.toJSON(message.postgresqlConfig141c) : undefined); + message.postgresqlConfig15 !== undefined && + (obj.postgresqlHostConfig_15 = message.postgresqlConfig15 + ? PostgresqlHostConfig15.toJSON(message.postgresqlConfig15) + : undefined); + message.postgresqlConfig151c !== undefined && + (obj.postgresqlHostConfig_15_1c = message.postgresqlConfig151c + ? Postgresqlhostconfig151c.toJSON(message.postgresqlConfig151c) + : undefined); + message.postgresqlConfig16 !== undefined && + (obj.postgresqlHostConfig_16 = message.postgresqlConfig16 + ? PostgresqlHostConfig16.toJSON(message.postgresqlConfig16) + : undefined); + message.postgresqlConfig161c !== undefined && + (obj.postgresqlHostConfig_16_1c = message.postgresqlConfig161c + ? Postgresqlhostconfig161c.toJSON(message.postgresqlConfig161c) + : undefined); return obj; }, @@ -6373,6 +6629,26 @@ export const ConfigHostSpec = { object.postgresqlConfig141c !== null ? Postgresqlhostconfig141c.fromPartial(object.postgresqlConfig141c) : undefined; + message.postgresqlConfig15 = + object.postgresqlConfig15 !== undefined && + object.postgresqlConfig15 !== null + ? PostgresqlHostConfig15.fromPartial(object.postgresqlConfig15) + : undefined; + message.postgresqlConfig151c = + object.postgresqlConfig151c !== undefined && + object.postgresqlConfig151c !== null + ? Postgresqlhostconfig151c.fromPartial(object.postgresqlConfig151c) + : undefined; + message.postgresqlConfig16 = + object.postgresqlConfig16 !== undefined && + object.postgresqlConfig16 !== null + ? PostgresqlHostConfig16.fromPartial(object.postgresqlConfig16) + : undefined; + message.postgresqlConfig161c = + object.postgresqlConfig161c !== undefined && + object.postgresqlConfig161c !== null + ? Postgresqlhostconfig161c.fromPartial(object.postgresqlConfig161c) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host15.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host15.ts new file mode 100644 index 00000000..d5d5003f --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host15.ts @@ -0,0 +1,2028 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface PostgresqlHostConfig15 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig15"; + /** in milliseconds. */ + recoveryMinApplyDelay?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + tempFileLimit?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + constraintExclusion: PostgresqlHostConfig15_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: PostgresqlHostConfig15_ForceParallelMode; + clientMinMessages: PostgresqlHostConfig15_LogLevel; + logMinMessages: PostgresqlHostConfig15_LogLevel; + logMinErrorStatement: PostgresqlHostConfig15_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: PostgresqlHostConfig15_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: PostgresqlHostConfig15_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: PostgresqlHostConfig15_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: PostgresqlHostConfig15_ByteaOutput; + xmlbinary: PostgresqlHostConfig15_XmlBinary; + xmloption: PostgresqlHostConfig15_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: PostgresqlHostConfig15_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + timezone: string; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; +} + +export enum PostgresqlHostConfig15_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig15_BackslashQuoteFromJSON( + object: any +): PostgresqlHostConfig15_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlHostConfig15_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlHostConfig15_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlHostConfig15_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlHostConfig15_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlHostConfig15_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig15_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig15_BackslashQuoteToJSON( + object: PostgresqlHostConfig15_BackslashQuote +): string { + switch (object) { + case PostgresqlHostConfig15_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlHostConfig15_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlHostConfig15_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlHostConfig15_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlHostConfig15_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig15_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig15_ByteaOutputFromJSON( + object: any +): PostgresqlHostConfig15_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlHostConfig15_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlHostConfig15_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlHostConfig15_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig15_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig15_ByteaOutputToJSON( + object: PostgresqlHostConfig15_ByteaOutput +): string { + switch (object) { + case PostgresqlHostConfig15_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlHostConfig15_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlHostConfig15_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig15_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig15_ConstraintExclusionFromJSON( + object: any +): PostgresqlHostConfig15_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return PostgresqlHostConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return PostgresqlHostConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return PostgresqlHostConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return PostgresqlHostConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig15_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig15_ConstraintExclusionToJSON( + object: PostgresqlHostConfig15_ConstraintExclusion +): string { + switch (object) { + case PostgresqlHostConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case PostgresqlHostConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case PostgresqlHostConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case PostgresqlHostConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig15_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig15_ForceParallelModeFromJSON( + object: any +): PostgresqlHostConfig15_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return PostgresqlHostConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return PostgresqlHostConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return PostgresqlHostConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return PostgresqlHostConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig15_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig15_ForceParallelModeToJSON( + object: PostgresqlHostConfig15_ForceParallelMode +): string { + switch (object) { + case PostgresqlHostConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case PostgresqlHostConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case PostgresqlHostConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case PostgresqlHostConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig15_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig15_LogErrorVerbosityFromJSON( + object: any +): PostgresqlHostConfig15_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlHostConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlHostConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlHostConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlHostConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig15_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig15_LogErrorVerbosityToJSON( + object: PostgresqlHostConfig15_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlHostConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlHostConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlHostConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlHostConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig15_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig15_LogLevelFromJSON( + object: any +): PostgresqlHostConfig15_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return PostgresqlHostConfig15_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig15_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig15_LogLevelToJSON( + object: PostgresqlHostConfig15_LogLevel +): string { + switch (object) { + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case PostgresqlHostConfig15_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig15_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig15_LogStatementFromJSON( + object: any +): PostgresqlHostConfig15_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return PostgresqlHostConfig15_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return PostgresqlHostConfig15_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return PostgresqlHostConfig15_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return PostgresqlHostConfig15_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return PostgresqlHostConfig15_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig15_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig15_LogStatementToJSON( + object: PostgresqlHostConfig15_LogStatement +): string { + switch (object) { + case PostgresqlHostConfig15_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case PostgresqlHostConfig15_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case PostgresqlHostConfig15_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case PostgresqlHostConfig15_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case PostgresqlHostConfig15_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig15_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig15_TransactionIsolationFromJSON( + object: any +): PostgresqlHostConfig15_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return PostgresqlHostConfig15_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return PostgresqlHostConfig15_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return PostgresqlHostConfig15_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return PostgresqlHostConfig15_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return PostgresqlHostConfig15_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig15_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig15_TransactionIsolationToJSON( + object: PostgresqlHostConfig15_TransactionIsolation +): string { + switch (object) { + case PostgresqlHostConfig15_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case PostgresqlHostConfig15_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case PostgresqlHostConfig15_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case PostgresqlHostConfig15_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case PostgresqlHostConfig15_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig15_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig15_XmlBinaryFromJSON( + object: any +): PostgresqlHostConfig15_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return PostgresqlHostConfig15_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return PostgresqlHostConfig15_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return PostgresqlHostConfig15_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig15_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig15_XmlBinaryToJSON( + object: PostgresqlHostConfig15_XmlBinary +): string { + switch (object) { + case PostgresqlHostConfig15_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case PostgresqlHostConfig15_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case PostgresqlHostConfig15_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig15_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig15_XmlOptionFromJSON( + object: any +): PostgresqlHostConfig15_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return PostgresqlHostConfig15_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return PostgresqlHostConfig15_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return PostgresqlHostConfig15_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig15_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig15_XmlOptionToJSON( + object: PostgresqlHostConfig15_XmlOption +): string { + switch (object) { + case PostgresqlHostConfig15_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case PostgresqlHostConfig15_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case PostgresqlHostConfig15_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +const basePostgresqlHostConfig15: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig15", + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", +}; + +export const PostgresqlHostConfig15 = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig15" as const, + + encode( + message: PostgresqlHostConfig15, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.recoveryMinApplyDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.recoveryMinApplyDelay!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(72).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(104).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(112).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(120).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(128).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(162).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(170).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(176).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(192).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(210).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(224).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(234).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(256).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(264).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(272).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(282).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(290).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(320).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(338).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(370).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(386).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(434).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(482).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(538).string(message.timezone); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(554).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PostgresqlHostConfig15 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlHostConfig15 } as PostgresqlHostConfig15; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.recoveryMinApplyDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.constraintExclusion = reader.int32() as any; + break; + case 10: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.forceParallelMode = reader.int32() as any; + break; + case 14: + message.clientMinMessages = reader.int32() as any; + break; + case 15: + message.logMinMessages = reader.int32() as any; + break; + case 16: + message.logMinErrorStatement = reader.int32() as any; + break; + case 17: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 21: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 22: + message.logErrorVerbosity = reader.int32() as any; + break; + case 23: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.logStatement = reader.int32() as any; + break; + case 25: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 26: + message.searchPath = reader.string(); + break; + case 27: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 28: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 29: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 30: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.byteaOutput = reader.int32() as any; + break; + case 33: + message.xmlbinary = reader.int32() as any; + break; + case 34: + message.xmloption = reader.int32() as any; + break; + case 35: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 36: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 37: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 40: + message.backslashQuote = reader.int32() as any; + break; + case 41: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 42: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 43: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 45: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 47: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 48: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 49: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 50: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 54: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 55: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 60: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 61: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 64: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.timezone = reader.string(); + break; + case 68: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlHostConfig15 { + const message = { ...basePostgresqlHostConfig15 } as PostgresqlHostConfig15; + message.recoveryMinApplyDelay = + object.recoveryMinApplyDelay !== undefined && + object.recoveryMinApplyDelay !== null + ? Number(object.recoveryMinApplyDelay) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlHostConfig15_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlHostConfig15_ForceParallelModeFromJSON( + object.forceParallelMode + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlHostConfig15_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlHostConfig15_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlHostConfig15_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlHostConfig15_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlHostConfig15_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlHostConfig15_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlHostConfig15_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlHostConfig15_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlHostConfig15_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlHostConfig15_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + return message; + }, + + toJSON(message: PostgresqlHostConfig15): unknown { + const obj: any = {}; + message.recoveryMinApplyDelay !== undefined && + (obj.recoveryMinApplyDelay = message.recoveryMinApplyDelay); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = + postgresqlHostConfig15_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlHostConfig15_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlHostConfig15_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlHostConfig15_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlHostConfig15_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlHostConfig15_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlHostConfig15_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlHostConfig15_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlHostConfig15_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlHostConfig15_XmlBinaryToJSON( + message.xmlbinary + )); + message.xmloption !== undefined && + (obj.xmloption = postgresqlHostConfig15_XmlOptionToJSON( + message.xmloption + )); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlHostConfig15_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlHostConfig15 { + const message = { ...basePostgresqlHostConfig15 } as PostgresqlHostConfig15; + message.recoveryMinApplyDelay = object.recoveryMinApplyDelay ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.workMem = object.workMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.timezone = object.timezone ?? ""; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlHostConfig15.$type, PostgresqlHostConfig15); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host15_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host15_1c.ts new file mode 100644 index 00000000..fb9a85aa --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host15_1c.ts @@ -0,0 +1,2037 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface Postgresqlhostconfig151c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig15_1C"; + /** in milliseconds. */ + recoveryMinApplyDelay?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + tempFileLimit?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + constraintExclusion: Postgresqlhostconfig151c_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: Postgresqlhostconfig151c_ForceParallelMode; + clientMinMessages: Postgresqlhostconfig151c_LogLevel; + logMinMessages: Postgresqlhostconfig151c_LogLevel; + logMinErrorStatement: Postgresqlhostconfig151c_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: Postgresqlhostconfig151c_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: Postgresqlhostconfig151c_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: Postgresqlhostconfig151c_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: Postgresqlhostconfig151c_ByteaOutput; + xmlbinary: Postgresqlhostconfig151c_XmlBinary; + xmloption: Postgresqlhostconfig151c_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: Postgresqlhostconfig151c_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + timezone: string; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; +} + +export enum Postgresqlhostconfig151c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig151c_BackslashQuoteFromJSON( + object: any +): Postgresqlhostconfig151c_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlhostconfig151c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return Postgresqlhostconfig151c_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return Postgresqlhostconfig151c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlhostconfig151c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlhostconfig151c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig151c_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig151c_BackslashQuoteToJSON( + object: Postgresqlhostconfig151c_BackslashQuote +): string { + switch (object) { + case Postgresqlhostconfig151c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlhostconfig151c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlhostconfig151c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlhostconfig151c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlhostconfig151c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig151c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig151c_ByteaOutputFromJSON( + object: any +): Postgresqlhostconfig151c_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlhostconfig151c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return Postgresqlhostconfig151c_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlhostconfig151c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig151c_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig151c_ByteaOutputToJSON( + object: Postgresqlhostconfig151c_ByteaOutput +): string { + switch (object) { + case Postgresqlhostconfig151c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlhostconfig151c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlhostconfig151c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig151c_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig151c_ConstraintExclusionFromJSON( + object: any +): Postgresqlhostconfig151c_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return Postgresqlhostconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return Postgresqlhostconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return Postgresqlhostconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return Postgresqlhostconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig151c_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig151c_ConstraintExclusionToJSON( + object: Postgresqlhostconfig151c_ConstraintExclusion +): string { + switch (object) { + case Postgresqlhostconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case Postgresqlhostconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case Postgresqlhostconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case Postgresqlhostconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig151c_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig151c_ForceParallelModeFromJSON( + object: any +): Postgresqlhostconfig151c_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return Postgresqlhostconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return Postgresqlhostconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return Postgresqlhostconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return Postgresqlhostconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig151c_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig151c_ForceParallelModeToJSON( + object: Postgresqlhostconfig151c_ForceParallelMode +): string { + switch (object) { + case Postgresqlhostconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case Postgresqlhostconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case Postgresqlhostconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case Postgresqlhostconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig151c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig151c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlhostconfig151c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlhostconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlhostconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlhostconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlhostconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig151c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig151c_LogErrorVerbosityToJSON( + object: Postgresqlhostconfig151c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlhostconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlhostconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlhostconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlhostconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig151c_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig151c_LogLevelFromJSON( + object: any +): Postgresqlhostconfig151c_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig151c_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig151c_LogLevelToJSON( + object: Postgresqlhostconfig151c_LogLevel +): string { + switch (object) { + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case Postgresqlhostconfig151c_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig151c_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig151c_LogStatementFromJSON( + object: any +): Postgresqlhostconfig151c_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return Postgresqlhostconfig151c_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return Postgresqlhostconfig151c_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return Postgresqlhostconfig151c_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return Postgresqlhostconfig151c_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return Postgresqlhostconfig151c_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig151c_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig151c_LogStatementToJSON( + object: Postgresqlhostconfig151c_LogStatement +): string { + switch (object) { + case Postgresqlhostconfig151c_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case Postgresqlhostconfig151c_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case Postgresqlhostconfig151c_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case Postgresqlhostconfig151c_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case Postgresqlhostconfig151c_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig151c_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig151c_TransactionIsolationFromJSON( + object: any +): Postgresqlhostconfig151c_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return Postgresqlhostconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return Postgresqlhostconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return Postgresqlhostconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return Postgresqlhostconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return Postgresqlhostconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig151c_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig151c_TransactionIsolationToJSON( + object: Postgresqlhostconfig151c_TransactionIsolation +): string { + switch (object) { + case Postgresqlhostconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case Postgresqlhostconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case Postgresqlhostconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case Postgresqlhostconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case Postgresqlhostconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig151c_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig151c_XmlBinaryFromJSON( + object: any +): Postgresqlhostconfig151c_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return Postgresqlhostconfig151c_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return Postgresqlhostconfig151c_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return Postgresqlhostconfig151c_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig151c_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig151c_XmlBinaryToJSON( + object: Postgresqlhostconfig151c_XmlBinary +): string { + switch (object) { + case Postgresqlhostconfig151c_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case Postgresqlhostconfig151c_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case Postgresqlhostconfig151c_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig151c_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig151c_XmlOptionFromJSON( + object: any +): Postgresqlhostconfig151c_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return Postgresqlhostconfig151c_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return Postgresqlhostconfig151c_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return Postgresqlhostconfig151c_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig151c_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig151c_XmlOptionToJSON( + object: Postgresqlhostconfig151c_XmlOption +): string { + switch (object) { + case Postgresqlhostconfig151c_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case Postgresqlhostconfig151c_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case Postgresqlhostconfig151c_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +const basePostgresqlhostconfig151c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig15_1C", + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", +}; + +export const Postgresqlhostconfig151c = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig15_1C" as const, + + encode( + message: Postgresqlhostconfig151c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.recoveryMinApplyDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.recoveryMinApplyDelay!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(72).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(104).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(112).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(120).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(128).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(162).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(170).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(176).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(192).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(210).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(224).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(234).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(256).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(264).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(272).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(282).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(290).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(320).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(338).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(370).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(386).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(434).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(482).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(538).string(message.timezone); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(554).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlhostconfig151c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePostgresqlhostconfig151c, + } as Postgresqlhostconfig151c; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.recoveryMinApplyDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.constraintExclusion = reader.int32() as any; + break; + case 10: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.forceParallelMode = reader.int32() as any; + break; + case 14: + message.clientMinMessages = reader.int32() as any; + break; + case 15: + message.logMinMessages = reader.int32() as any; + break; + case 16: + message.logMinErrorStatement = reader.int32() as any; + break; + case 17: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 21: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 22: + message.logErrorVerbosity = reader.int32() as any; + break; + case 23: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.logStatement = reader.int32() as any; + break; + case 25: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 26: + message.searchPath = reader.string(); + break; + case 27: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 28: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 29: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 30: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.byteaOutput = reader.int32() as any; + break; + case 33: + message.xmlbinary = reader.int32() as any; + break; + case 34: + message.xmloption = reader.int32() as any; + break; + case 35: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 36: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 37: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 40: + message.backslashQuote = reader.int32() as any; + break; + case 41: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 42: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 43: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 45: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 47: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 48: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 49: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 50: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 54: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 55: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 60: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 61: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 64: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.timezone = reader.string(); + break; + case 68: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlhostconfig151c { + const message = { + ...basePostgresqlhostconfig151c, + } as Postgresqlhostconfig151c; + message.recoveryMinApplyDelay = + object.recoveryMinApplyDelay !== undefined && + object.recoveryMinApplyDelay !== null + ? Number(object.recoveryMinApplyDelay) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlhostconfig151c_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlhostconfig151c_ForceParallelModeFromJSON( + object.forceParallelMode + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlhostconfig151c_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlhostconfig151c_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlhostconfig151c_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlhostconfig151c_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlhostconfig151c_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlhostconfig151c_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlhostconfig151c_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlhostconfig151c_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlhostconfig151c_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlhostconfig151c_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + return message; + }, + + toJSON(message: Postgresqlhostconfig151c): unknown { + const obj: any = {}; + message.recoveryMinApplyDelay !== undefined && + (obj.recoveryMinApplyDelay = message.recoveryMinApplyDelay); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = + postgresqlhostconfig151c_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlhostconfig151c_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlhostconfig151c_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlhostconfig151c_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlhostconfig151c_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlhostconfig151c_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlhostconfig151c_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlhostconfig151c_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlhostconfig151c_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlhostconfig151c_XmlBinaryToJSON( + message.xmlbinary + )); + message.xmloption !== undefined && + (obj.xmloption = postgresqlhostconfig151c_XmlOptionToJSON( + message.xmloption + )); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlhostconfig151c_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlhostconfig151c { + const message = { + ...basePostgresqlhostconfig151c, + } as Postgresqlhostconfig151c; + message.recoveryMinApplyDelay = object.recoveryMinApplyDelay ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.workMem = object.workMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.timezone = object.timezone ?? ""; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Postgresqlhostconfig151c.$type, + Postgresqlhostconfig151c +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host16.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host16.ts new file mode 100644 index 00000000..d6ef4fee --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host16.ts @@ -0,0 +1,2082 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface PostgresqlHostConfig16 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig16"; + /** in milliseconds. */ + recoveryMinApplyDelay?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + tempFileLimit?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + constraintExclusion: PostgresqlHostConfig16_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + debugParallelQuery: PostgresqlHostConfig16_DebugParallelQuery; + clientMinMessages: PostgresqlHostConfig16_LogLevel; + logMinMessages: PostgresqlHostConfig16_LogLevel; + logMinErrorStatement: PostgresqlHostConfig16_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: PostgresqlHostConfig16_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: PostgresqlHostConfig16_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: PostgresqlHostConfig16_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: PostgresqlHostConfig16_ByteaOutput; + xmlbinary: PostgresqlHostConfig16_XmlBinary; + xmloption: PostgresqlHostConfig16_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: PostgresqlHostConfig16_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + timezone: string; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; +} + +export enum PostgresqlHostConfig16_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig16_BackslashQuoteFromJSON( + object: any +): PostgresqlHostConfig16_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlHostConfig16_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlHostConfig16_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlHostConfig16_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlHostConfig16_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlHostConfig16_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig16_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig16_BackslashQuoteToJSON( + object: PostgresqlHostConfig16_BackslashQuote +): string { + switch (object) { + case PostgresqlHostConfig16_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlHostConfig16_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlHostConfig16_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlHostConfig16_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlHostConfig16_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig16_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig16_ByteaOutputFromJSON( + object: any +): PostgresqlHostConfig16_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlHostConfig16_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlHostConfig16_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlHostConfig16_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig16_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig16_ByteaOutputToJSON( + object: PostgresqlHostConfig16_ByteaOutput +): string { + switch (object) { + case PostgresqlHostConfig16_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlHostConfig16_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlHostConfig16_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig16_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig16_ConstraintExclusionFromJSON( + object: any +): PostgresqlHostConfig16_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return PostgresqlHostConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return PostgresqlHostConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return PostgresqlHostConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return PostgresqlHostConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig16_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig16_ConstraintExclusionToJSON( + object: PostgresqlHostConfig16_ConstraintExclusion +): string { + switch (object) { + case PostgresqlHostConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case PostgresqlHostConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case PostgresqlHostConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case PostgresqlHostConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig16_DebugParallelQuery { + DEBUG_PARALLEL_QUERY_UNSPECIFIED = 0, + DEBUG_PARALLEL_QUERY_ON = 1, + DEBUG_PARALLEL_QUERY_OFF = 2, + DEBUG_PARALLEL_QUERY_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig16_DebugParallelQueryFromJSON( + object: any +): PostgresqlHostConfig16_DebugParallelQuery { + switch (object) { + case 0: + case "DEBUG_PARALLEL_QUERY_UNSPECIFIED": + return PostgresqlHostConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_UNSPECIFIED; + case 1: + case "DEBUG_PARALLEL_QUERY_ON": + return PostgresqlHostConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_ON; + case 2: + case "DEBUG_PARALLEL_QUERY_OFF": + return PostgresqlHostConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_OFF; + case 3: + case "DEBUG_PARALLEL_QUERY_REGRESS": + return PostgresqlHostConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig16_DebugParallelQuery.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig16_DebugParallelQueryToJSON( + object: PostgresqlHostConfig16_DebugParallelQuery +): string { + switch (object) { + case PostgresqlHostConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_UNSPECIFIED: + return "DEBUG_PARALLEL_QUERY_UNSPECIFIED"; + case PostgresqlHostConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_ON: + return "DEBUG_PARALLEL_QUERY_ON"; + case PostgresqlHostConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_OFF: + return "DEBUG_PARALLEL_QUERY_OFF"; + case PostgresqlHostConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_REGRESS: + return "DEBUG_PARALLEL_QUERY_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig16_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig16_ForceParallelModeFromJSON( + object: any +): PostgresqlHostConfig16_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return PostgresqlHostConfig16_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return PostgresqlHostConfig16_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return PostgresqlHostConfig16_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return PostgresqlHostConfig16_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig16_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig16_ForceParallelModeToJSON( + object: PostgresqlHostConfig16_ForceParallelMode +): string { + switch (object) { + case PostgresqlHostConfig16_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case PostgresqlHostConfig16_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case PostgresqlHostConfig16_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case PostgresqlHostConfig16_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig16_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig16_LogErrorVerbosityFromJSON( + object: any +): PostgresqlHostConfig16_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlHostConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlHostConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlHostConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlHostConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig16_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig16_LogErrorVerbosityToJSON( + object: PostgresqlHostConfig16_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlHostConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlHostConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlHostConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlHostConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig16_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_INFO = 12, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig16_LogLevelFromJSON( + object: any +): PostgresqlHostConfig16_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_DEBUG1; + case 12: + case "LOG_LEVEL_INFO": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_INFO; + case 6: + case "LOG_LEVEL_LOG": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return PostgresqlHostConfig16_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig16_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig16_LogLevelToJSON( + object: PostgresqlHostConfig16_LogLevel +): string { + switch (object) { + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_INFO: + return "LOG_LEVEL_INFO"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case PostgresqlHostConfig16_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig16_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig16_LogStatementFromJSON( + object: any +): PostgresqlHostConfig16_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return PostgresqlHostConfig16_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return PostgresqlHostConfig16_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return PostgresqlHostConfig16_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return PostgresqlHostConfig16_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return PostgresqlHostConfig16_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig16_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig16_LogStatementToJSON( + object: PostgresqlHostConfig16_LogStatement +): string { + switch (object) { + case PostgresqlHostConfig16_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case PostgresqlHostConfig16_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case PostgresqlHostConfig16_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case PostgresqlHostConfig16_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case PostgresqlHostConfig16_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig16_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig16_TransactionIsolationFromJSON( + object: any +): PostgresqlHostConfig16_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return PostgresqlHostConfig16_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return PostgresqlHostConfig16_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return PostgresqlHostConfig16_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return PostgresqlHostConfig16_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return PostgresqlHostConfig16_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig16_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig16_TransactionIsolationToJSON( + object: PostgresqlHostConfig16_TransactionIsolation +): string { + switch (object) { + case PostgresqlHostConfig16_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case PostgresqlHostConfig16_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case PostgresqlHostConfig16_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case PostgresqlHostConfig16_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case PostgresqlHostConfig16_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig16_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig16_XmlBinaryFromJSON( + object: any +): PostgresqlHostConfig16_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return PostgresqlHostConfig16_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return PostgresqlHostConfig16_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return PostgresqlHostConfig16_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig16_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig16_XmlBinaryToJSON( + object: PostgresqlHostConfig16_XmlBinary +): string { + switch (object) { + case PostgresqlHostConfig16_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case PostgresqlHostConfig16_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case PostgresqlHostConfig16_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig16_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig16_XmlOptionFromJSON( + object: any +): PostgresqlHostConfig16_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return PostgresqlHostConfig16_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return PostgresqlHostConfig16_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return PostgresqlHostConfig16_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig16_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig16_XmlOptionToJSON( + object: PostgresqlHostConfig16_XmlOption +): string { + switch (object) { + case PostgresqlHostConfig16_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case PostgresqlHostConfig16_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case PostgresqlHostConfig16_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +const basePostgresqlHostConfig16: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig16", + constraintExclusion: 0, + debugParallelQuery: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", +}; + +export const PostgresqlHostConfig16 = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig16" as const, + + encode( + message: PostgresqlHostConfig16, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.recoveryMinApplyDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.recoveryMinApplyDelay!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(72).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.debugParallelQuery !== 0) { + writer.uint32(104).int32(message.debugParallelQuery); + } + if (message.clientMinMessages !== 0) { + writer.uint32(112).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(120).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(128).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(162).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(170).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(176).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(192).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(210).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(224).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(234).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(256).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(264).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(272).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(282).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(290).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(320).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(338).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(370).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(386).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(434).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(482).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(538).string(message.timezone); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(554).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PostgresqlHostConfig16 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlHostConfig16 } as PostgresqlHostConfig16; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.recoveryMinApplyDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.constraintExclusion = reader.int32() as any; + break; + case 10: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.debugParallelQuery = reader.int32() as any; + break; + case 14: + message.clientMinMessages = reader.int32() as any; + break; + case 15: + message.logMinMessages = reader.int32() as any; + break; + case 16: + message.logMinErrorStatement = reader.int32() as any; + break; + case 17: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 21: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 22: + message.logErrorVerbosity = reader.int32() as any; + break; + case 23: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.logStatement = reader.int32() as any; + break; + case 25: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 26: + message.searchPath = reader.string(); + break; + case 27: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 28: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 29: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 30: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.byteaOutput = reader.int32() as any; + break; + case 33: + message.xmlbinary = reader.int32() as any; + break; + case 34: + message.xmloption = reader.int32() as any; + break; + case 35: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 36: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 37: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 40: + message.backslashQuote = reader.int32() as any; + break; + case 41: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 42: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 43: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 45: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 47: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 48: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 49: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 50: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 54: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 55: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 60: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 61: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 64: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.timezone = reader.string(); + break; + case 68: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlHostConfig16 { + const message = { ...basePostgresqlHostConfig16 } as PostgresqlHostConfig16; + message.recoveryMinApplyDelay = + object.recoveryMinApplyDelay !== undefined && + object.recoveryMinApplyDelay !== null + ? Number(object.recoveryMinApplyDelay) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlHostConfig16_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.debugParallelQuery = + object.debugParallelQuery !== undefined && + object.debugParallelQuery !== null + ? postgresqlHostConfig16_DebugParallelQueryFromJSON( + object.debugParallelQuery + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlHostConfig16_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlHostConfig16_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlHostConfig16_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlHostConfig16_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlHostConfig16_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlHostConfig16_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlHostConfig16_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlHostConfig16_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlHostConfig16_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlHostConfig16_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + return message; + }, + + toJSON(message: PostgresqlHostConfig16): unknown { + const obj: any = {}; + message.recoveryMinApplyDelay !== undefined && + (obj.recoveryMinApplyDelay = message.recoveryMinApplyDelay); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = + postgresqlHostConfig16_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.debugParallelQuery !== undefined && + (obj.debugParallelQuery = postgresqlHostConfig16_DebugParallelQueryToJSON( + message.debugParallelQuery + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlHostConfig16_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlHostConfig16_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlHostConfig16_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlHostConfig16_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlHostConfig16_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlHostConfig16_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlHostConfig16_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlHostConfig16_XmlBinaryToJSON( + message.xmlbinary + )); + message.xmloption !== undefined && + (obj.xmloption = postgresqlHostConfig16_XmlOptionToJSON( + message.xmloption + )); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlHostConfig16_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlHostConfig16 { + const message = { ...basePostgresqlHostConfig16 } as PostgresqlHostConfig16; + message.recoveryMinApplyDelay = object.recoveryMinApplyDelay ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.workMem = object.workMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.debugParallelQuery = object.debugParallelQuery ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.timezone = object.timezone ?? ""; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlHostConfig16.$type, PostgresqlHostConfig16); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host16_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host16_1c.ts new file mode 100644 index 00000000..a0ce126d --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host16_1c.ts @@ -0,0 +1,2092 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface Postgresqlhostconfig161c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig16_1C"; + /** in milliseconds. */ + recoveryMinApplyDelay?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + tempFileLimit?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + constraintExclusion: Postgresqlhostconfig161c_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + debugParallelQuery: Postgresqlhostconfig161c_DebugParallelQuery; + clientMinMessages: Postgresqlhostconfig161c_LogLevel; + logMinMessages: Postgresqlhostconfig161c_LogLevel; + logMinErrorStatement: Postgresqlhostconfig161c_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: Postgresqlhostconfig161c_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: Postgresqlhostconfig161c_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: Postgresqlhostconfig161c_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: Postgresqlhostconfig161c_ByteaOutput; + xmlbinary: Postgresqlhostconfig161c_XmlBinary; + xmloption: Postgresqlhostconfig161c_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: Postgresqlhostconfig161c_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + timezone: string; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; +} + +export enum Postgresqlhostconfig161c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig161c_BackslashQuoteFromJSON( + object: any +): Postgresqlhostconfig161c_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlhostconfig161c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return Postgresqlhostconfig161c_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return Postgresqlhostconfig161c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlhostconfig161c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlhostconfig161c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig161c_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig161c_BackslashQuoteToJSON( + object: Postgresqlhostconfig161c_BackslashQuote +): string { + switch (object) { + case Postgresqlhostconfig161c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlhostconfig161c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlhostconfig161c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlhostconfig161c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlhostconfig161c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig161c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig161c_ByteaOutputFromJSON( + object: any +): Postgresqlhostconfig161c_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlhostconfig161c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return Postgresqlhostconfig161c_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlhostconfig161c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig161c_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig161c_ByteaOutputToJSON( + object: Postgresqlhostconfig161c_ByteaOutput +): string { + switch (object) { + case Postgresqlhostconfig161c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlhostconfig161c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlhostconfig161c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig161c_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig161c_ConstraintExclusionFromJSON( + object: any +): Postgresqlhostconfig161c_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return Postgresqlhostconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return Postgresqlhostconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return Postgresqlhostconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return Postgresqlhostconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig161c_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig161c_ConstraintExclusionToJSON( + object: Postgresqlhostconfig161c_ConstraintExclusion +): string { + switch (object) { + case Postgresqlhostconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case Postgresqlhostconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case Postgresqlhostconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case Postgresqlhostconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig161c_DebugParallelQuery { + DEBUG_PARALLEL_QUERY_UNSPECIFIED = 0, + DEBUG_PARALLEL_QUERY_ON = 1, + DEBUG_PARALLEL_QUERY_OFF = 2, + DEBUG_PARALLEL_QUERY_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig161c_DebugParallelQueryFromJSON( + object: any +): Postgresqlhostconfig161c_DebugParallelQuery { + switch (object) { + case 0: + case "DEBUG_PARALLEL_QUERY_UNSPECIFIED": + return Postgresqlhostconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_UNSPECIFIED; + case 1: + case "DEBUG_PARALLEL_QUERY_ON": + return Postgresqlhostconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_ON; + case 2: + case "DEBUG_PARALLEL_QUERY_OFF": + return Postgresqlhostconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_OFF; + case 3: + case "DEBUG_PARALLEL_QUERY_REGRESS": + return Postgresqlhostconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig161c_DebugParallelQuery.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig161c_DebugParallelQueryToJSON( + object: Postgresqlhostconfig161c_DebugParallelQuery +): string { + switch (object) { + case Postgresqlhostconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_UNSPECIFIED: + return "DEBUG_PARALLEL_QUERY_UNSPECIFIED"; + case Postgresqlhostconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_ON: + return "DEBUG_PARALLEL_QUERY_ON"; + case Postgresqlhostconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_OFF: + return "DEBUG_PARALLEL_QUERY_OFF"; + case Postgresqlhostconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_REGRESS: + return "DEBUG_PARALLEL_QUERY_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig161c_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig161c_ForceParallelModeFromJSON( + object: any +): Postgresqlhostconfig161c_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return Postgresqlhostconfig161c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return Postgresqlhostconfig161c_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return Postgresqlhostconfig161c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return Postgresqlhostconfig161c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig161c_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig161c_ForceParallelModeToJSON( + object: Postgresqlhostconfig161c_ForceParallelMode +): string { + switch (object) { + case Postgresqlhostconfig161c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case Postgresqlhostconfig161c_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case Postgresqlhostconfig161c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case Postgresqlhostconfig161c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig161c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig161c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlhostconfig161c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlhostconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlhostconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlhostconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlhostconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig161c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig161c_LogErrorVerbosityToJSON( + object: Postgresqlhostconfig161c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlhostconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlhostconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlhostconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlhostconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig161c_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_INFO = 12, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig161c_LogLevelFromJSON( + object: any +): Postgresqlhostconfig161c_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_DEBUG1; + case 12: + case "LOG_LEVEL_INFO": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_INFO; + case 6: + case "LOG_LEVEL_LOG": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig161c_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig161c_LogLevelToJSON( + object: Postgresqlhostconfig161c_LogLevel +): string { + switch (object) { + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_INFO: + return "LOG_LEVEL_INFO"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case Postgresqlhostconfig161c_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig161c_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig161c_LogStatementFromJSON( + object: any +): Postgresqlhostconfig161c_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return Postgresqlhostconfig161c_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return Postgresqlhostconfig161c_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return Postgresqlhostconfig161c_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return Postgresqlhostconfig161c_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return Postgresqlhostconfig161c_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig161c_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig161c_LogStatementToJSON( + object: Postgresqlhostconfig161c_LogStatement +): string { + switch (object) { + case Postgresqlhostconfig161c_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case Postgresqlhostconfig161c_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case Postgresqlhostconfig161c_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case Postgresqlhostconfig161c_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case Postgresqlhostconfig161c_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig161c_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig161c_TransactionIsolationFromJSON( + object: any +): Postgresqlhostconfig161c_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return Postgresqlhostconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return Postgresqlhostconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return Postgresqlhostconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return Postgresqlhostconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return Postgresqlhostconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig161c_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig161c_TransactionIsolationToJSON( + object: Postgresqlhostconfig161c_TransactionIsolation +): string { + switch (object) { + case Postgresqlhostconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case Postgresqlhostconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case Postgresqlhostconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case Postgresqlhostconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case Postgresqlhostconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig161c_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig161c_XmlBinaryFromJSON( + object: any +): Postgresqlhostconfig161c_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return Postgresqlhostconfig161c_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return Postgresqlhostconfig161c_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return Postgresqlhostconfig161c_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig161c_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig161c_XmlBinaryToJSON( + object: Postgresqlhostconfig161c_XmlBinary +): string { + switch (object) { + case Postgresqlhostconfig161c_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case Postgresqlhostconfig161c_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case Postgresqlhostconfig161c_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig161c_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig161c_XmlOptionFromJSON( + object: any +): Postgresqlhostconfig161c_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return Postgresqlhostconfig161c_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return Postgresqlhostconfig161c_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return Postgresqlhostconfig161c_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig161c_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig161c_XmlOptionToJSON( + object: Postgresqlhostconfig161c_XmlOption +): string { + switch (object) { + case Postgresqlhostconfig161c_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case Postgresqlhostconfig161c_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case Postgresqlhostconfig161c_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +const basePostgresqlhostconfig161c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig16_1C", + constraintExclusion: 0, + debugParallelQuery: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", +}; + +export const Postgresqlhostconfig161c = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig16_1C" as const, + + encode( + message: Postgresqlhostconfig161c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.recoveryMinApplyDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.recoveryMinApplyDelay!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(72).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.debugParallelQuery !== 0) { + writer.uint32(104).int32(message.debugParallelQuery); + } + if (message.clientMinMessages !== 0) { + writer.uint32(112).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(120).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(128).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(162).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(170).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(176).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(192).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(210).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(224).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(234).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(256).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(264).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(272).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(282).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(290).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(320).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(338).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(370).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(386).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(434).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(482).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(538).string(message.timezone); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(554).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlhostconfig161c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePostgresqlhostconfig161c, + } as Postgresqlhostconfig161c; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.recoveryMinApplyDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.constraintExclusion = reader.int32() as any; + break; + case 10: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.debugParallelQuery = reader.int32() as any; + break; + case 14: + message.clientMinMessages = reader.int32() as any; + break; + case 15: + message.logMinMessages = reader.int32() as any; + break; + case 16: + message.logMinErrorStatement = reader.int32() as any; + break; + case 17: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 21: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 22: + message.logErrorVerbosity = reader.int32() as any; + break; + case 23: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.logStatement = reader.int32() as any; + break; + case 25: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 26: + message.searchPath = reader.string(); + break; + case 27: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 28: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 29: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 30: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.byteaOutput = reader.int32() as any; + break; + case 33: + message.xmlbinary = reader.int32() as any; + break; + case 34: + message.xmloption = reader.int32() as any; + break; + case 35: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 36: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 37: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 40: + message.backslashQuote = reader.int32() as any; + break; + case 41: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 42: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 43: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 45: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 47: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 48: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 49: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 50: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 54: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 55: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 60: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 61: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 64: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.timezone = reader.string(); + break; + case 68: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlhostconfig161c { + const message = { + ...basePostgresqlhostconfig161c, + } as Postgresqlhostconfig161c; + message.recoveryMinApplyDelay = + object.recoveryMinApplyDelay !== undefined && + object.recoveryMinApplyDelay !== null + ? Number(object.recoveryMinApplyDelay) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlhostconfig161c_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.debugParallelQuery = + object.debugParallelQuery !== undefined && + object.debugParallelQuery !== null + ? postgresqlhostconfig161c_DebugParallelQueryFromJSON( + object.debugParallelQuery + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlhostconfig161c_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlhostconfig161c_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlhostconfig161c_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlhostconfig161c_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlhostconfig161c_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlhostconfig161c_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlhostconfig161c_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlhostconfig161c_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlhostconfig161c_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlhostconfig161c_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + return message; + }, + + toJSON(message: Postgresqlhostconfig161c): unknown { + const obj: any = {}; + message.recoveryMinApplyDelay !== undefined && + (obj.recoveryMinApplyDelay = message.recoveryMinApplyDelay); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = + postgresqlhostconfig161c_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.debugParallelQuery !== undefined && + (obj.debugParallelQuery = + postgresqlhostconfig161c_DebugParallelQueryToJSON( + message.debugParallelQuery + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlhostconfig161c_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlhostconfig161c_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlhostconfig161c_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlhostconfig161c_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlhostconfig161c_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlhostconfig161c_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlhostconfig161c_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlhostconfig161c_XmlBinaryToJSON( + message.xmlbinary + )); + message.xmloption !== undefined && + (obj.xmloption = postgresqlhostconfig161c_XmlOptionToJSON( + message.xmloption + )); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlhostconfig161c_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlhostconfig161c { + const message = { + ...basePostgresqlhostconfig161c, + } as Postgresqlhostconfig161c; + message.recoveryMinApplyDelay = object.recoveryMinApplyDelay ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.workMem = object.workMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.debugParallelQuery = object.debugParallelQuery ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.timezone = object.timezone ?? ""; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Postgresqlhostconfig161c.$type, + Postgresqlhostconfig161c +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts index 54f12f79..800a7a3e 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts @@ -162,8 +162,21 @@ export interface PostgresqlConfig10 { geqoThreshold?: number; /** tradeoff between planning time and query plan quality, default is 5 */ geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; /** initial value of the random number generator used by GEQO */ geqoSeed?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; } export enum PostgresqlConfig10_WalLevel { @@ -858,6 +871,8 @@ export enum PostgresqlConfig10_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, UNRECOGNIZED = -1, } @@ -886,6 +901,12 @@ export function postgresqlConfig10_SharedPreloadLibrariesFromJSON( case 6: case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": return PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; case -1: case "UNRECOGNIZED": default: @@ -911,6 +932,10 @@ export function postgresqlConfig10_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; case PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; default: return "UNKNOWN"; } @@ -1825,12 +1850,72 @@ export const PostgresqlConfig10 = { writer.uint32(1234).fork() ).ldelim(); } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } if (message.geqoSeed !== undefined) { DoubleValue.encode( { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, writer.uint32(1266).fork() ).ldelim(); } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } return writer; }, @@ -2482,9 +2567,51 @@ export const PostgresqlConfig10 = { case 154: message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; case 158: message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3050,10 +3177,43 @@ export const PostgresqlConfig10 = { object.geqoEffort !== undefined && object.geqoEffort !== null ? Number(object.geqoEffort) : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; message.geqoSeed = object.geqoSeed !== undefined && object.geqoSeed !== null ? Number(object.geqoSeed) : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; return message; }, @@ -3324,7 +3484,21 @@ export const PostgresqlConfig10 = { message.geqoThreshold !== undefined && (obj.geqoThreshold = message.geqoThreshold); message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); return obj; }, @@ -3473,7 +3647,15 @@ export const PostgresqlConfig10 = { message.geqo = object.geqo ?? undefined; message.geqoThreshold = object.geqoThreshold ?? undefined; message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; message.geqoSeed = object.geqoSeed ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts index ec292611..ac44e53b 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts @@ -164,8 +164,21 @@ export interface Postgresqlconfig101c { geqoThreshold?: number; /** tradeoff between planning time and query plan quality, default is 5 */ geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; /** initial value of the random number generator used by GEQO */ geqoSeed?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; } export enum Postgresqlconfig101c_WalLevel { @@ -860,6 +873,8 @@ export enum Postgresqlconfig101c_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, UNRECOGNIZED = -1, } @@ -888,6 +903,12 @@ export function postgresqlconfig101c_SharedPreloadLibrariesFromJSON( case 6: case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": return Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; case -1: case "UNRECOGNIZED": default: @@ -913,6 +934,10 @@ export function postgresqlconfig101c_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; case Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; default: return "UNKNOWN"; } @@ -1845,12 +1870,72 @@ export const Postgresqlconfig101c = { writer.uint32(1234).fork() ).ldelim(); } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } if (message.geqoSeed !== undefined) { DoubleValue.encode( { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, writer.uint32(1266).fork() ).ldelim(); } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } return writer; }, @@ -2517,9 +2602,51 @@ export const Postgresqlconfig101c = { case 154: message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; case 158: message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3101,10 +3228,43 @@ export const Postgresqlconfig101c = { object.geqoEffort !== undefined && object.geqoEffort !== null ? Number(object.geqoEffort) : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; message.geqoSeed = object.geqoSeed !== undefined && object.geqoSeed !== null ? Number(object.geqoSeed) : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; return message; }, @@ -3380,7 +3540,21 @@ export const Postgresqlconfig101c = { message.geqoThreshold !== undefined && (obj.geqoThreshold = message.geqoThreshold); message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); return obj; }, @@ -3531,7 +3705,15 @@ export const Postgresqlconfig101c = { message.geqo = object.geqo ?? undefined; message.geqoThreshold = object.geqoThreshold ?? undefined; message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; message.geqoSeed = object.geqoSeed ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts index 5dccefc6..e67d4123 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts @@ -170,8 +170,24 @@ export interface PostgresqlConfig11 { geqoThreshold?: number; /** tradeoff between planning time and query plan quality, default is 5 */ geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; /** initial value of the random number generator used by GEQO */ geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; } export enum PostgresqlConfig11_BackslashQuote { @@ -626,6 +642,8 @@ export enum PostgresqlConfig11_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, UNRECOGNIZED = -1, } @@ -654,6 +672,12 @@ export function postgresqlConfig11_SharedPreloadLibrariesFromJSON( case 6: case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; case -1: case "UNRECOGNIZED": default: @@ -679,6 +703,10 @@ export function postgresqlConfig11_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; default: return "UNKNOWN"; } @@ -1902,12 +1930,99 @@ export const PostgresqlConfig11 = { writer.uint32(1234).fork() ).ldelim(); } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } if (message.geqoSeed !== undefined) { DoubleValue.encode( { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, writer.uint32(1266).fork() ).ldelim(); } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } return writer; }, @@ -2604,9 +2719,69 @@ export const PostgresqlConfig11 = { case 154: message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; case 158: message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3211,10 +3386,58 @@ export const PostgresqlConfig11 = { object.geqoEffort !== undefined && object.geqoEffort !== null ? Number(object.geqoEffort) : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; message.geqoSeed = object.geqoSeed !== undefined && object.geqoSeed !== null ? Number(object.geqoSeed) : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; return message; }, @@ -3502,7 +3725,29 @@ export const PostgresqlConfig11 = { message.geqoThreshold !== undefined && (obj.geqoThreshold = message.geqoThreshold); message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); return obj; }, @@ -3664,7 +3909,21 @@ export const PostgresqlConfig11 = { message.geqo = object.geqo ?? undefined; message.geqoThreshold = object.geqoThreshold ?? undefined; message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts index 31d5cc74..06bd2e1d 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts @@ -172,8 +172,24 @@ export interface Postgresqlconfig111c { geqoThreshold?: number; /** tradeoff between planning time and query plan quality, default is 5 */ geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; /** initial value of the random number generator used by GEQO */ geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; } export enum Postgresqlconfig111c_BackslashQuote { @@ -628,6 +644,8 @@ export enum Postgresqlconfig111c_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, UNRECOGNIZED = -1, } @@ -656,6 +674,12 @@ export function postgresqlconfig111c_SharedPreloadLibrariesFromJSON( case 6: case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; case -1: case "UNRECOGNIZED": default: @@ -681,6 +705,10 @@ export function postgresqlconfig111c_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; default: return "UNKNOWN"; } @@ -1922,12 +1950,99 @@ export const Postgresqlconfig111c = { writer.uint32(1234).fork() ).ldelim(); } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } if (message.geqoSeed !== undefined) { DoubleValue.encode( { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, writer.uint32(1266).fork() ).ldelim(); } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } return writer; }, @@ -2639,9 +2754,69 @@ export const Postgresqlconfig111c = { case 154: message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; case 158: message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3262,10 +3437,58 @@ export const Postgresqlconfig111c = { object.geqoEffort !== undefined && object.geqoEffort !== null ? Number(object.geqoEffort) : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; message.geqoSeed = object.geqoSeed !== undefined && object.geqoSeed !== null ? Number(object.geqoSeed) : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; return message; }, @@ -3558,7 +3781,29 @@ export const Postgresqlconfig111c = { message.geqoThreshold !== undefined && (obj.geqoThreshold = message.geqoThreshold); message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); return obj; }, @@ -3722,7 +3967,21 @@ export const Postgresqlconfig111c = { message.geqo = object.geqo ?? undefined; message.geqoThreshold = object.geqoThreshold ?? undefined; message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts index 599d27e9..36637883 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts @@ -172,8 +172,24 @@ export interface PostgresqlConfig12 { geqoThreshold?: number; /** tradeoff between planning time and query plan quality, default is 5 */ geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; /** initial value of the random number generator used by GEQO */ geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; } export enum PostgresqlConfig12_BackslashQuote { @@ -676,6 +692,8 @@ export enum PostgresqlConfig12_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, UNRECOGNIZED = -1, } @@ -704,6 +722,12 @@ export function postgresqlConfig12_SharedPreloadLibrariesFromJSON( case 6: case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; case -1: case "UNRECOGNIZED": default: @@ -729,6 +753,10 @@ export function postgresqlConfig12_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; default: return "UNKNOWN"; } @@ -1965,12 +1993,99 @@ export const PostgresqlConfig12 = { writer.uint32(1234).fork() ).ldelim(); } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } if (message.geqoSeed !== undefined) { DoubleValue.encode( { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, writer.uint32(1266).fork() ).ldelim(); } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } return writer; }, @@ -2676,9 +2791,69 @@ export const PostgresqlConfig12 = { case 154: message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; case 158: message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3292,10 +3467,58 @@ export const PostgresqlConfig12 = { object.geqoEffort !== undefined && object.geqoEffort !== null ? Number(object.geqoEffort) : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; message.geqoSeed = object.geqoSeed !== undefined && object.geqoSeed !== null ? Number(object.geqoSeed) : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; return message; }, @@ -3589,7 +3812,29 @@ export const PostgresqlConfig12 = { message.geqoThreshold !== undefined && (obj.geqoThreshold = message.geqoThreshold); message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); return obj; }, @@ -3754,7 +3999,21 @@ export const PostgresqlConfig12 = { message.geqo = object.geqo ?? undefined; message.geqoThreshold = object.geqoThreshold ?? undefined; message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts index 59eb96fb..13f5124b 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts @@ -174,8 +174,24 @@ export interface Postgresqlconfig121c { geqoThreshold?: number; /** tradeoff between planning time and query plan quality, default is 5 */ geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; /** initial value of the random number generator used by GEQO */ geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; } export enum Postgresqlconfig121c_BackslashQuote { @@ -678,6 +694,8 @@ export enum Postgresqlconfig121c_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, UNRECOGNIZED = -1, } @@ -706,6 +724,12 @@ export function postgresqlconfig121c_SharedPreloadLibrariesFromJSON( case 6: case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; case -1: case "UNRECOGNIZED": default: @@ -731,6 +755,10 @@ export function postgresqlconfig121c_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; default: return "UNKNOWN"; } @@ -1985,12 +2013,99 @@ export const Postgresqlconfig121c = { writer.uint32(1234).fork() ).ldelim(); } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } if (message.geqoSeed !== undefined) { DoubleValue.encode( { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, writer.uint32(1266).fork() ).ldelim(); } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } return writer; }, @@ -2711,9 +2826,69 @@ export const Postgresqlconfig121c = { case 154: message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; case 158: message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3343,10 +3518,58 @@ export const Postgresqlconfig121c = { object.geqoEffort !== undefined && object.geqoEffort !== null ? Number(object.geqoEffort) : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; message.geqoSeed = object.geqoSeed !== undefined && object.geqoSeed !== null ? Number(object.geqoSeed) : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; return message; }, @@ -3645,7 +3868,29 @@ export const Postgresqlconfig121c = { message.geqoThreshold !== undefined && (obj.geqoThreshold = message.geqoThreshold); message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); return obj; }, @@ -3812,7 +4057,21 @@ export const Postgresqlconfig121c = { message.geqo = object.geqo ?? undefined; message.geqoThreshold = object.geqoThreshold ?? undefined; message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts index 680fefbd..ea2e2169 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts @@ -190,8 +190,24 @@ export interface PostgresqlConfig13 { geqoThreshold?: number; /** tradeoff between planning time and query plan quality, default is 5 */ geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; /** initial value of the random number generator used by GEQO */ geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; } export enum PostgresqlConfig13_BackslashQuote { @@ -694,6 +710,8 @@ export enum PostgresqlConfig13_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, UNRECOGNIZED = -1, } @@ -722,6 +740,12 @@ export function postgresqlConfig13_SharedPreloadLibrariesFromJSON( case 6: case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; case -1: case "UNRECOGNIZED": default: @@ -747,6 +771,10 @@ export function postgresqlConfig13_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; default: return "UNKNOWN"; } @@ -2088,12 +2116,99 @@ export const PostgresqlConfig13 = { writer.uint32(1234).fork() ).ldelim(); } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } if (message.geqoSeed !== undefined) { DoubleValue.encode( { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, writer.uint32(1266).fork() ).ldelim(); } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } return writer; }, @@ -2871,9 +2986,69 @@ export const PostgresqlConfig13 = { case 154: message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; case 158: message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3546,10 +3721,58 @@ export const PostgresqlConfig13 = { object.geqoEffort !== undefined && object.geqoEffort !== null ? Number(object.geqoEffort) : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; message.geqoSeed = object.geqoSeed !== undefined && object.geqoSeed !== null ? Number(object.geqoSeed) : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; return message; }, @@ -3869,7 +4092,29 @@ export const PostgresqlConfig13 = { message.geqoThreshold !== undefined && (obj.geqoThreshold = message.geqoThreshold); message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); return obj; }, @@ -4050,7 +4295,21 @@ export const PostgresqlConfig13 = { message.geqo = object.geqo ?? undefined; message.geqoThreshold = object.geqoThreshold ?? undefined; message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c.ts index 57598ef0..8f7f4371 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c.ts @@ -190,8 +190,24 @@ export interface Postgresqlconfig131c { geqoThreshold?: number; /** tradeoff between planning time and query plan quality, default is 5 */ geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; /** initial value of the random number generator used by GEQO */ geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; } export enum Postgresqlconfig131c_BackslashQuote { @@ -694,6 +710,8 @@ export enum Postgresqlconfig131c_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, UNRECOGNIZED = -1, } @@ -722,6 +740,12 @@ export function postgresqlconfig131c_SharedPreloadLibrariesFromJSON( case 6: case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": return Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; case -1: case "UNRECOGNIZED": default: @@ -747,6 +771,10 @@ export function postgresqlconfig131c_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; case Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; default: return "UNKNOWN"; } @@ -2100,12 +2128,99 @@ export const Postgresqlconfig131c = { writer.uint32(1234).fork() ).ldelim(); } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } if (message.geqoSeed !== undefined) { DoubleValue.encode( { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, writer.uint32(1266).fork() ).ldelim(); } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } return writer; }, @@ -2892,9 +3007,69 @@ export const Postgresqlconfig131c = { case 154: message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; case 158: message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3579,10 +3754,58 @@ export const Postgresqlconfig131c = { object.geqoEffort !== undefined && object.geqoEffort !== null ? Number(object.geqoEffort) : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; message.geqoSeed = object.geqoSeed !== undefined && object.geqoSeed !== null ? Number(object.geqoSeed) : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; return message; }, @@ -3905,7 +4128,29 @@ export const Postgresqlconfig131c = { message.geqoThreshold !== undefined && (obj.geqoThreshold = message.geqoThreshold); message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); return obj; }, @@ -4087,7 +4332,21 @@ export const Postgresqlconfig131c = { message.geqo = object.geqo ?? undefined; message.geqoThreshold = object.geqoThreshold ?? undefined; message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts index 90d6e092..05822cbd 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts @@ -199,8 +199,24 @@ export interface PostgresqlConfig14 { geqoThreshold?: number; /** tradeoff between planning time and query plan quality, default is 5 */ geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; /** initial value of the random number generator used by GEQO */ geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; } export enum PostgresqlConfig14_BackslashQuote { @@ -703,6 +719,8 @@ export enum PostgresqlConfig14_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, UNRECOGNIZED = -1, } @@ -731,6 +749,12 @@ export function postgresqlConfig14_SharedPreloadLibrariesFromJSON( case 6: case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; case -1: case "UNRECOGNIZED": default: @@ -756,6 +780,10 @@ export function postgresqlConfig14_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; default: return "UNKNOWN"; } @@ -2139,12 +2167,99 @@ export const PostgresqlConfig14 = { writer.uint32(1234).fork() ).ldelim(); } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } if (message.geqoSeed !== undefined) { DoubleValue.encode( { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, writer.uint32(1266).fork() ).ldelim(); } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } return writer; }, @@ -2952,9 +3067,69 @@ export const PostgresqlConfig14 = { case 154: message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; case 158: message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3651,10 +3826,58 @@ export const PostgresqlConfig14 = { object.geqoEffort !== undefined && object.geqoEffort !== null ? Number(object.geqoEffort) : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; message.geqoSeed = object.geqoSeed !== undefined && object.geqoSeed !== null ? Number(object.geqoSeed) : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; return message; }, @@ -3984,7 +4207,29 @@ export const PostgresqlConfig14 = { message.geqoThreshold !== undefined && (obj.geqoThreshold = message.geqoThreshold); message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); return obj; }, @@ -4171,7 +4416,21 @@ export const PostgresqlConfig14 = { message.geqo = object.geqo ?? undefined; message.geqoThreshold = object.geqoThreshold ?? undefined; message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c.ts index 92d5bb8d..6dab914a 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c.ts @@ -199,8 +199,24 @@ export interface Postgresqlconfig141c { geqoThreshold?: number; /** tradeoff between planning time and query plan quality, default is 5 */ geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; /** initial value of the random number generator used by GEQO */ geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; } export enum Postgresqlconfig141c_BackslashQuote { @@ -703,6 +719,8 @@ export enum Postgresqlconfig141c_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, UNRECOGNIZED = -1, } @@ -731,6 +749,12 @@ export function postgresqlconfig141c_SharedPreloadLibrariesFromJSON( case 6: case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": return Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; case -1: case "UNRECOGNIZED": default: @@ -756,6 +780,10 @@ export function postgresqlconfig141c_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; case Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; default: return "UNKNOWN"; } @@ -2151,12 +2179,99 @@ export const Postgresqlconfig141c = { writer.uint32(1234).fork() ).ldelim(); } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } if (message.geqoSeed !== undefined) { DoubleValue.encode( { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, writer.uint32(1266).fork() ).ldelim(); } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } return writer; }, @@ -2973,9 +3088,69 @@ export const Postgresqlconfig141c = { case 154: message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; case 158: message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3684,10 +3859,58 @@ export const Postgresqlconfig141c = { object.geqoEffort !== undefined && object.geqoEffort !== null ? Number(object.geqoEffort) : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; message.geqoSeed = object.geqoSeed !== undefined && object.geqoSeed !== null ? Number(object.geqoSeed) : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; return message; }, @@ -4020,7 +4243,29 @@ export const Postgresqlconfig141c = { message.geqoThreshold !== undefined && (obj.geqoThreshold = message.geqoThreshold); message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); return obj; }, @@ -4208,7 +4453,21 @@ export const Postgresqlconfig141c = { message.geqo = object.geqo ?? undefined; message.geqoThreshold = object.geqoThreshold ?? undefined; message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql15.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql15.ts new file mode 100644 index 00000000..8216bd3a --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql15.ts @@ -0,0 +1,4619 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface PostgresqlConfig15 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig15"; + maxConnections?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + maxPreparedTransactions?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + maintenanceWorkMem?: number; + /** in bytes. */ + autovacuumWorkMem?: number; + /** in bytes. */ + tempFileLimit?: number; + /** in milliseconds. */ + vacuumCostDelay?: number; + vacuumCostPageHit?: number; + vacuumCostPageMiss?: number; + vacuumCostPageDirty?: number; + vacuumCostLimit?: number; + /** in milliseconds. */ + bgwriterDelay?: number; + bgwriterLruMaxpages?: number; + bgwriterLruMultiplier?: number; + bgwriterFlushAfter?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + walLevel: PostgresqlConfig15_WalLevel; + synchronousCommit: PostgresqlConfig15_SynchronousCommit; + /** in milliseconds. */ + checkpointTimeout?: number; + checkpointCompletionTarget?: number; + checkpointFlushAfter?: number; + /** in bytes. */ + maxWalSize?: number; + /** in bytes. */ + minWalSize?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + defaultStatisticsTarget?: number; + constraintExclusion: PostgresqlConfig15_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: PostgresqlConfig15_ForceParallelMode; + clientMinMessages: PostgresqlConfig15_LogLevel; + logMinMessages: PostgresqlConfig15_LogLevel; + logMinErrorStatement: PostgresqlConfig15_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: PostgresqlConfig15_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: PostgresqlConfig15_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: PostgresqlConfig15_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: PostgresqlConfig15_ByteaOutput; + xmlbinary: PostgresqlConfig15_XmlBinary; + xmloption: PostgresqlConfig15_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: PostgresqlConfig15_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + autovacuumMaxWorkers?: number; + autovacuumVacuumCostDelay?: number; + autovacuumVacuumCostLimit?: number; + /** in milliseconds. */ + autovacuumNaptime?: number; + /** in milliseconds. */ + archiveTimeout?: number; + trackActivityQuerySize?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxWorkerProcesses?: number; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + autovacuumVacuumScaleFactor?: number; + autovacuumAnalyzeScaleFactor?: number; + defaultTransactionReadOnly?: boolean; + timezone: string; + enableParallelAppend?: boolean; + enableParallelHash?: boolean; + enablePartitionPruning?: boolean; + enablePartitionwiseAggregate?: boolean; + enablePartitionwiseJoin?: boolean; + jit?: boolean; + maxParallelMaintenanceWorkers?: number; + parallelLeaderParticipation?: boolean; + logTransactionSampleRate?: number; + planCacheMode: PostgresqlConfig15_PlanCacheMode; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; + sharedPreloadLibraries: PostgresqlConfig15_SharedPreloadLibraries[]; + /** in milliseconds. */ + autoExplainLogMinDuration?: number; + autoExplainLogAnalyze?: boolean; + autoExplainLogBuffers?: boolean; + autoExplainLogTiming?: boolean; + autoExplainLogTriggers?: boolean; + autoExplainLogVerbose?: boolean; + autoExplainLogNestedStatements?: boolean; + autoExplainSampleRate?: number; + pgHintPlanEnableHint?: boolean; + pgHintPlanEnableHintTable?: boolean; + pgHintPlanDebugPrint: PostgresqlConfig15_PgHintPlanDebugPrint; + pgHintPlanMessageLevel: PostgresqlConfig15_LogLevel; + hashMemMultiplier?: number; + /** in bytes. */ + logicalDecodingWorkMem?: number; + maintenanceIoConcurrency?: number; + /** in bytes. */ + maxSlotWalKeepSize?: number; + /** in bytes. */ + walKeepSize?: number; + enableIncrementalSort?: boolean; + autovacuumVacuumInsertThreshold?: number; + autovacuumVacuumInsertScaleFactor?: number; + /** in milliseconds. */ + logMinDurationSample?: number; + logStatementSampleRate?: number; + /** in bytes. */ + logParameterMaxLength?: number; + /** in bytes. */ + logParameterMaxLengthOnError?: number; + /** in milliseconds. */ + clientConnectionCheckInterval?: number; + enableAsyncAppend?: boolean; + enableGathermerge?: boolean; + enableMemoize?: boolean; + /** in milliseconds. */ + logRecoveryConflictWaits?: boolean; + /** in milliseconds. */ + vacuumFailsafeAge?: number; + /** in milliseconds. */ + vacuumMultixactFailsafeAge?: number; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; + /** in bytes. */ + maxStackDepth?: number; + enableGroupByReordering?: boolean; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; +} + +export enum PostgresqlConfig15_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_BackslashQuoteFromJSON( + object: any +): PostgresqlConfig15_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlConfig15_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlConfig15_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlConfig15_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlConfig15_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlConfig15_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_BackslashQuoteToJSON( + object: PostgresqlConfig15_BackslashQuote +): string { + switch (object) { + case PostgresqlConfig15_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlConfig15_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlConfig15_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlConfig15_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlConfig15_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_ByteaOutputFromJSON( + object: any +): PostgresqlConfig15_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlConfig15_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlConfig15_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlConfig15_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_ByteaOutputToJSON( + object: PostgresqlConfig15_ByteaOutput +): string { + switch (object) { + case PostgresqlConfig15_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlConfig15_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlConfig15_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_ConstraintExclusionFromJSON( + object: any +): PostgresqlConfig15_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return PostgresqlConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return PostgresqlConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return PostgresqlConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return PostgresqlConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_ConstraintExclusionToJSON( + object: PostgresqlConfig15_ConstraintExclusion +): string { + switch (object) { + case PostgresqlConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case PostgresqlConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case PostgresqlConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case PostgresqlConfig15_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_ForceParallelModeFromJSON( + object: any +): PostgresqlConfig15_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return PostgresqlConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return PostgresqlConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return PostgresqlConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return PostgresqlConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_ForceParallelModeToJSON( + object: PostgresqlConfig15_ForceParallelMode +): string { + switch (object) { + case PostgresqlConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case PostgresqlConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case PostgresqlConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case PostgresqlConfig15_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_LogErrorVerbosityFromJSON( + object: any +): PostgresqlConfig15_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_LogErrorVerbosityToJSON( + object: PostgresqlConfig15_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlConfig15_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_LogLevelFromJSON( + object: any +): PostgresqlConfig15_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return PostgresqlConfig15_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_LogLevelToJSON( + object: PostgresqlConfig15_LogLevel +): string { + switch (object) { + case PostgresqlConfig15_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case PostgresqlConfig15_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case PostgresqlConfig15_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case PostgresqlConfig15_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case PostgresqlConfig15_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case PostgresqlConfig15_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case PostgresqlConfig15_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case PostgresqlConfig15_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case PostgresqlConfig15_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case PostgresqlConfig15_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case PostgresqlConfig15_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case PostgresqlConfig15_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_LogStatementFromJSON( + object: any +): PostgresqlConfig15_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return PostgresqlConfig15_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return PostgresqlConfig15_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return PostgresqlConfig15_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return PostgresqlConfig15_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return PostgresqlConfig15_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_LogStatementToJSON( + object: PostgresqlConfig15_LogStatement +): string { + switch (object) { + case PostgresqlConfig15_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case PostgresqlConfig15_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case PostgresqlConfig15_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case PostgresqlConfig15_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case PostgresqlConfig15_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_PgHintPlanDebugPrintFromJSON( + object: any +): PostgresqlConfig15_PgHintPlanDebugPrint { + switch (object) { + case 0: + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return PostgresqlConfig15_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case 1: + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return PostgresqlConfig15_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case 2: + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return PostgresqlConfig15_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; + case 3: + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return PostgresqlConfig15_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; + case 4: + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return PostgresqlConfig15_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_PgHintPlanDebugPrint.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_PgHintPlanDebugPrintToJSON( + object: PostgresqlConfig15_PgHintPlanDebugPrint +): string { + switch (object) { + case PostgresqlConfig15_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case PostgresqlConfig15_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case PostgresqlConfig15_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case PostgresqlConfig15_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case PostgresqlConfig15_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_PlanCacheModeFromJSON( + object: any +): PostgresqlConfig15_PlanCacheMode { + switch (object) { + case 0: + case "PLAN_CACHE_MODE_UNSPECIFIED": + return PostgresqlConfig15_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case 1: + case "PLAN_CACHE_MODE_AUTO": + return PostgresqlConfig15_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case 2: + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return PostgresqlConfig15_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return PostgresqlConfig15_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_PlanCacheMode.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_PlanCacheModeToJSON( + object: PostgresqlConfig15_PlanCacheMode +): string { + switch (object) { + case PostgresqlConfig15_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case PostgresqlConfig15_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case PostgresqlConfig15_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case PostgresqlConfig15_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_SharedPreloadLibrariesFromJSON( + object: any +): PostgresqlConfig15_SharedPreloadLibraries { + switch (object) { + case 0: + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case 1: + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case 2: + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_SharedPreloadLibraries.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_SharedPreloadLibrariesToJSON( + object: PostgresqlConfig15_SharedPreloadLibraries +): string { + switch (object) { + case PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case PostgresqlConfig15_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_SynchronousCommitFromJSON( + object: any +): PostgresqlConfig15_SynchronousCommit { + switch (object) { + case 0: + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case 1: + case "SYNCHRONOUS_COMMIT_ON": + return PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case 2: + case "SYNCHRONOUS_COMMIT_OFF": + return PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_SynchronousCommit.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_SynchronousCommitToJSON( + object: PostgresqlConfig15_SynchronousCommit +): string { + switch (object) { + case PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case PostgresqlConfig15_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_TransactionIsolationFromJSON( + object: any +): PostgresqlConfig15_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return PostgresqlConfig15_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return PostgresqlConfig15_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return PostgresqlConfig15_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return PostgresqlConfig15_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return PostgresqlConfig15_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_TransactionIsolationToJSON( + object: PostgresqlConfig15_TransactionIsolation +): string { + switch (object) { + case PostgresqlConfig15_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case PostgresqlConfig15_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case PostgresqlConfig15_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case PostgresqlConfig15_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case PostgresqlConfig15_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_WalLevelFromJSON( + object: any +): PostgresqlConfig15_WalLevel { + switch (object) { + case 0: + case "WAL_LEVEL_UNSPECIFIED": + return PostgresqlConfig15_WalLevel.WAL_LEVEL_UNSPECIFIED; + case 1: + case "WAL_LEVEL_REPLICA": + return PostgresqlConfig15_WalLevel.WAL_LEVEL_REPLICA; + case 2: + case "WAL_LEVEL_LOGICAL": + return PostgresqlConfig15_WalLevel.WAL_LEVEL_LOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_WalLevel.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_WalLevelToJSON( + object: PostgresqlConfig15_WalLevel +): string { + switch (object) { + case PostgresqlConfig15_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case PostgresqlConfig15_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case PostgresqlConfig15_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_XmlBinaryFromJSON( + object: any +): PostgresqlConfig15_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return PostgresqlConfig15_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return PostgresqlConfig15_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return PostgresqlConfig15_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_XmlBinaryToJSON( + object: PostgresqlConfig15_XmlBinary +): string { + switch (object) { + case PostgresqlConfig15_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case PostgresqlConfig15_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case PostgresqlConfig15_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig15_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig15_XmlOptionFromJSON( + object: any +): PostgresqlConfig15_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return PostgresqlConfig15_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return PostgresqlConfig15_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return PostgresqlConfig15_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig15_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlConfig15_XmlOptionToJSON( + object: PostgresqlConfig15_XmlOption +): string { + switch (object) { + case PostgresqlConfig15_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case PostgresqlConfig15_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case PostgresqlConfig15_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +export interface PostgresqlConfigSet15 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet15"; + /** + * Effective settings for a PostgreSQL 15 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: PostgresqlConfig15; + /** User-defined settings for a PostgreSQL 15 cluster. */ + userConfig?: PostgresqlConfig15; + /** Default configuration for a PostgreSQL 15 cluster. */ + defaultConfig?: PostgresqlConfig15; +} + +const basePostgresqlConfig15: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig15", + walLevel: 0, + synchronousCommit: 0, + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", + planCacheMode: 0, + sharedPreloadLibraries: 0, + pgHintPlanDebugPrint: 0, + pgHintPlanMessageLevel: 0, +}; + +export const PostgresqlConfig15 = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig15" as const, + + encode( + message: PostgresqlConfig15, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maintenanceWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceWorkMem!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.autovacuumWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumWorkMem!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.vacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostDelay!, + }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.vacuumCostPageHit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageHit!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.vacuumCostPageMiss !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageMiss!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.vacuumCostPageDirty !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageDirty!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.vacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostLimit!, + }, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.bgwriterDelay !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.bgwriterDelay! }, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.bgwriterLruMaxpages !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterLruMaxpages!, + }, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.bgwriterLruMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.bgwriterLruMultiplier!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.bgwriterFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterFlushAfter!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.walLevel !== 0) { + writer.uint32(160).int32(message.walLevel); + } + if (message.synchronousCommit !== 0) { + writer.uint32(168).int32(message.synchronousCommit); + } + if (message.checkpointTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointTimeout!, + }, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.checkpointCompletionTarget !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.checkpointCompletionTarget!, + }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.checkpointFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointFlushAfter!, + }, + writer.uint32(194).fork() + ).ldelim(); + } + if (message.maxWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxWalSize! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.minWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.minWalSize! }, + writer.uint32(210).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultStatisticsTarget !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.defaultStatisticsTarget!, + }, + writer.uint32(226).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(232).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(258).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(264).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(272).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(280).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(288).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(322).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(336).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(352).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(370).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(384).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(416).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(424).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(432).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(480).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(554).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(562).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(570).fork() + ).ldelim(); + } + if (message.autovacuumMaxWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumMaxWorkers!, + }, + writer.uint32(578).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostDelay!, + }, + writer.uint32(586).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostLimit!, + }, + writer.uint32(594).fork() + ).ldelim(); + } + if (message.autovacuumNaptime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumNaptime!, + }, + writer.uint32(602).fork() + ).ldelim(); + } + if (message.archiveTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.archiveTimeout! }, + writer.uint32(610).fork() + ).ldelim(); + } + if (message.trackActivityQuerySize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.trackActivityQuerySize!, + }, + writer.uint32(618).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(642).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(650).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(658).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(666).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(674).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(682).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(690).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(698).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(706).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(714).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(722).fork() + ).ldelim(); + } + if (message.maxWorkerProcesses !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxWorkerProcesses!, + }, + writer.uint32(730).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(738).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(746).fork() + ).ldelim(); + } + if (message.autovacuumVacuumScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumScaleFactor!, + }, + writer.uint32(754).fork() + ).ldelim(); + } + if (message.autovacuumAnalyzeScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumAnalyzeScaleFactor!, + }, + writer.uint32(762).fork() + ).ldelim(); + } + if (message.defaultTransactionReadOnly !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.defaultTransactionReadOnly!, + }, + writer.uint32(770).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(778).string(message.timezone); + } + if (message.enableParallelAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelAppend!, + }, + writer.uint32(786).fork() + ).ldelim(); + } + if (message.enableParallelHash !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelHash!, + }, + writer.uint32(794).fork() + ).ldelim(); + } + if (message.enablePartitionPruning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionPruning!, + }, + writer.uint32(802).fork() + ).ldelim(); + } + if (message.enablePartitionwiseAggregate !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseAggregate!, + }, + writer.uint32(810).fork() + ).ldelim(); + } + if (message.enablePartitionwiseJoin !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseJoin!, + }, + writer.uint32(818).fork() + ).ldelim(); + } + if (message.jit !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.jit! }, + writer.uint32(826).fork() + ).ldelim(); + } + if (message.maxParallelMaintenanceWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelMaintenanceWorkers!, + }, + writer.uint32(834).fork() + ).ldelim(); + } + if (message.parallelLeaderParticipation !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.parallelLeaderParticipation!, + }, + writer.uint32(842).fork() + ).ldelim(); + } + if (message.logTransactionSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logTransactionSampleRate!, + }, + writer.uint32(858).fork() + ).ldelim(); + } + if (message.planCacheMode !== 0) { + writer.uint32(864).int32(message.planCacheMode); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(874).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(882).fork() + ).ldelim(); + } + writer.uint32(890).fork(); + for (const v of message.sharedPreloadLibraries) { + writer.int32(v); + } + writer.ldelim(); + if (message.autoExplainLogMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autoExplainLogMinDuration!, + }, + writer.uint32(898).fork() + ).ldelim(); + } + if (message.autoExplainLogAnalyze !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogAnalyze!, + }, + writer.uint32(906).fork() + ).ldelim(); + } + if (message.autoExplainLogBuffers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogBuffers!, + }, + writer.uint32(914).fork() + ).ldelim(); + } + if (message.autoExplainLogTiming !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTiming!, + }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.autoExplainLogTriggers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTriggers!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.autoExplainLogVerbose !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogVerbose!, + }, + writer.uint32(938).fork() + ).ldelim(); + } + if (message.autoExplainLogNestedStatements !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogNestedStatements!, + }, + writer.uint32(946).fork() + ).ldelim(); + } + if (message.autoExplainSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autoExplainSampleRate!, + }, + writer.uint32(954).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHint !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHint!, + }, + writer.uint32(962).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHintTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHintTable!, + }, + writer.uint32(970).fork() + ).ldelim(); + } + if (message.pgHintPlanDebugPrint !== 0) { + writer.uint32(976).int32(message.pgHintPlanDebugPrint); + } + if (message.pgHintPlanMessageLevel !== 0) { + writer.uint32(984).int32(message.pgHintPlanMessageLevel); + } + if (message.hashMemMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.hashMemMultiplier!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.logicalDecodingWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logicalDecodingWorkMem!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.maintenanceIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceIoConcurrency!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } + if (message.walKeepSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.walKeepSize! }, + writer.uint32(1034).fork() + ).ldelim(); + } + if (message.enableIncrementalSort !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIncrementalSort!, + }, + writer.uint32(1042).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumInsertThreshold!, + }, + writer.uint32(1050).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumInsertScaleFactor!, + }, + writer.uint32(1058).fork() + ).ldelim(); + } + if (message.logMinDurationSample !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationSample!, + }, + writer.uint32(1066).fork() + ).ldelim(); + } + if (message.logStatementSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logStatementSampleRate!, + }, + writer.uint32(1074).fork() + ).ldelim(); + } + if (message.logParameterMaxLength !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLength!, + }, + writer.uint32(1082).fork() + ).ldelim(); + } + if (message.logParameterMaxLengthOnError !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLengthOnError!, + }, + writer.uint32(1090).fork() + ).ldelim(); + } + if (message.clientConnectionCheckInterval !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.clientConnectionCheckInterval!, + }, + writer.uint32(1098).fork() + ).ldelim(); + } + if (message.enableAsyncAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableAsyncAppend!, + }, + writer.uint32(1106).fork() + ).ldelim(); + } + if (message.enableGathermerge !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableGathermerge!, + }, + writer.uint32(1114).fork() + ).ldelim(); + } + if (message.enableMemoize !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMemoize! }, + writer.uint32(1122).fork() + ).ldelim(); + } + if (message.logRecoveryConflictWaits !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logRecoveryConflictWaits!, + }, + writer.uint32(1130).fork() + ).ldelim(); + } + if (message.vacuumFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumFailsafeAge!, + }, + writer.uint32(1138).fork() + ).ldelim(); + } + if (message.vacuumMultixactFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumMultixactFailsafeAge!, + }, + writer.uint32(1146).fork() + ).ldelim(); + } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(1154).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1162).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1170).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1178).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1186).fork() + ).ldelim(); + } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.enableGroupByReordering !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableGroupByReordering!, + }, + writer.uint32(1210).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PostgresqlConfig15 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlConfig15 } as PostgresqlConfig15; + message.sharedPreloadLibraries = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 6: + message.maintenanceWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.autovacuumWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.vacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.vacuumCostPageHit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.vacuumCostPageMiss = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.vacuumCostPageDirty = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.vacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 14: + message.bgwriterDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 15: + message.bgwriterLruMaxpages = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 16: + message.bgwriterLruMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.bgwriterFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.walLevel = reader.int32() as any; + break; + case 21: + message.synchronousCommit = reader.int32() as any; + break; + case 22: + message.checkpointTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 23: + message.checkpointCompletionTarget = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.checkpointFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 25: + message.maxWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 26: + message.minWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 27: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 28: + message.defaultStatisticsTarget = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 29: + message.constraintExclusion = reader.int32() as any; + break; + case 30: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 33: + message.forceParallelMode = reader.int32() as any; + break; + case 34: + message.clientMinMessages = reader.int32() as any; + break; + case 35: + message.logMinMessages = reader.int32() as any; + break; + case 36: + message.logMinErrorStatement = reader.int32() as any; + break; + case 37: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 40: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 41: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 42: + message.logErrorVerbosity = reader.int32() as any; + break; + case 43: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 44: + message.logStatement = reader.int32() as any; + break; + case 45: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.searchPath = reader.string(); + break; + case 47: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 48: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 49: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 50: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 52: + message.byteaOutput = reader.int32() as any; + break; + case 53: + message.xmlbinary = reader.int32() as any; + break; + case 54: + message.xmloption = reader.int32() as any; + break; + case 55: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 60: + message.backslashQuote = reader.int32() as any; + break; + case 61: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 70: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 71: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 72: + message.autovacuumMaxWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 73: + message.autovacuumVacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 74: + message.autovacuumVacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 75: + message.autovacuumNaptime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 76: + message.archiveTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 77: + message.trackActivityQuerySize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 80: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 81: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 82: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 83: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 84: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 85: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 86: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 87: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 88: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 89: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 90: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 91: + message.maxWorkerProcesses = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 92: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 93: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 94: + message.autovacuumVacuumScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 95: + message.autovacuumAnalyzeScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 96: + message.defaultTransactionReadOnly = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 97: + message.timezone = reader.string(); + break; + case 98: + message.enableParallelAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 99: + message.enableParallelHash = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 100: + message.enablePartitionPruning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 101: + message.enablePartitionwiseAggregate = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 102: + message.enablePartitionwiseJoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 103: + message.jit = BoolValue.decode(reader, reader.uint32()).value; + break; + case 104: + message.maxParallelMaintenanceWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 105: + message.parallelLeaderParticipation = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 107: + message.logTransactionSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 108: + message.planCacheMode = reader.int32() as any; + break; + case 109: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 110: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 111: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + } else { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + break; + case 112: + message.autoExplainLogMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 113: + message.autoExplainLogAnalyze = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 114: + message.autoExplainLogBuffers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 115: + message.autoExplainLogTiming = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.autoExplainLogTriggers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.autoExplainLogVerbose = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 118: + message.autoExplainLogNestedStatements = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 119: + message.autoExplainSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 120: + message.pgHintPlanEnableHint = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 121: + message.pgHintPlanEnableHintTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 122: + message.pgHintPlanDebugPrint = reader.int32() as any; + break; + case 123: + message.pgHintPlanMessageLevel = reader.int32() as any; + break; + case 124: + message.hashMemMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.logicalDecodingWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.maintenanceIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 129: + message.walKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 130: + message.enableIncrementalSort = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 131: + message.autovacuumVacuumInsertThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 132: + message.autovacuumVacuumInsertScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 133: + message.logMinDurationSample = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 134: + message.logStatementSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 135: + message.logParameterMaxLength = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 136: + message.logParameterMaxLengthOnError = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 137: + message.clientConnectionCheckInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 138: + message.enableAsyncAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 139: + message.enableGathermerge = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 140: + message.enableMemoize = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 141: + message.logRecoveryConflictWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 142: + message.vacuumFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 143: + message.vacuumMultixactFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 144: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 145: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 146: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 147: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 148: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 151: + message.enableGroupByReordering = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlConfig15 { + const message = { ...basePostgresqlConfig15 } as PostgresqlConfig15; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.maintenanceWorkMem = + object.maintenanceWorkMem !== undefined && + object.maintenanceWorkMem !== null + ? Number(object.maintenanceWorkMem) + : undefined; + message.autovacuumWorkMem = + object.autovacuumWorkMem !== undefined && + object.autovacuumWorkMem !== null + ? Number(object.autovacuumWorkMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.vacuumCostDelay = + object.vacuumCostDelay !== undefined && object.vacuumCostDelay !== null + ? Number(object.vacuumCostDelay) + : undefined; + message.vacuumCostPageHit = + object.vacuumCostPageHit !== undefined && + object.vacuumCostPageHit !== null + ? Number(object.vacuumCostPageHit) + : undefined; + message.vacuumCostPageMiss = + object.vacuumCostPageMiss !== undefined && + object.vacuumCostPageMiss !== null + ? Number(object.vacuumCostPageMiss) + : undefined; + message.vacuumCostPageDirty = + object.vacuumCostPageDirty !== undefined && + object.vacuumCostPageDirty !== null + ? Number(object.vacuumCostPageDirty) + : undefined; + message.vacuumCostLimit = + object.vacuumCostLimit !== undefined && object.vacuumCostLimit !== null + ? Number(object.vacuumCostLimit) + : undefined; + message.bgwriterDelay = + object.bgwriterDelay !== undefined && object.bgwriterDelay !== null + ? Number(object.bgwriterDelay) + : undefined; + message.bgwriterLruMaxpages = + object.bgwriterLruMaxpages !== undefined && + object.bgwriterLruMaxpages !== null + ? Number(object.bgwriterLruMaxpages) + : undefined; + message.bgwriterLruMultiplier = + object.bgwriterLruMultiplier !== undefined && + object.bgwriterLruMultiplier !== null + ? Number(object.bgwriterLruMultiplier) + : undefined; + message.bgwriterFlushAfter = + object.bgwriterFlushAfter !== undefined && + object.bgwriterFlushAfter !== null + ? Number(object.bgwriterFlushAfter) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.walLevel = + object.walLevel !== undefined && object.walLevel !== null + ? postgresqlConfig15_WalLevelFromJSON(object.walLevel) + : 0; + message.synchronousCommit = + object.synchronousCommit !== undefined && + object.synchronousCommit !== null + ? postgresqlConfig15_SynchronousCommitFromJSON(object.synchronousCommit) + : 0; + message.checkpointTimeout = + object.checkpointTimeout !== undefined && + object.checkpointTimeout !== null + ? Number(object.checkpointTimeout) + : undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget !== undefined && + object.checkpointCompletionTarget !== null + ? Number(object.checkpointCompletionTarget) + : undefined; + message.checkpointFlushAfter = + object.checkpointFlushAfter !== undefined && + object.checkpointFlushAfter !== null + ? Number(object.checkpointFlushAfter) + : undefined; + message.maxWalSize = + object.maxWalSize !== undefined && object.maxWalSize !== null + ? Number(object.maxWalSize) + : undefined; + message.minWalSize = + object.minWalSize !== undefined && object.minWalSize !== null + ? Number(object.minWalSize) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget !== undefined && + object.defaultStatisticsTarget !== null + ? Number(object.defaultStatisticsTarget) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlConfig15_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlConfig15_ForceParallelModeFromJSON(object.forceParallelMode) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlConfig15_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlConfig15_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlConfig15_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlConfig15_LogErrorVerbosityFromJSON(object.logErrorVerbosity) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlConfig15_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlConfig15_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlConfig15_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlConfig15_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlConfig15_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlConfig15_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.autovacuumMaxWorkers = + object.autovacuumMaxWorkers !== undefined && + object.autovacuumMaxWorkers !== null + ? Number(object.autovacuumMaxWorkers) + : undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay !== undefined && + object.autovacuumVacuumCostDelay !== null + ? Number(object.autovacuumVacuumCostDelay) + : undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit !== undefined && + object.autovacuumVacuumCostLimit !== null + ? Number(object.autovacuumVacuumCostLimit) + : undefined; + message.autovacuumNaptime = + object.autovacuumNaptime !== undefined && + object.autovacuumNaptime !== null + ? Number(object.autovacuumNaptime) + : undefined; + message.archiveTimeout = + object.archiveTimeout !== undefined && object.archiveTimeout !== null + ? Number(object.archiveTimeout) + : undefined; + message.trackActivityQuerySize = + object.trackActivityQuerySize !== undefined && + object.trackActivityQuerySize !== null + ? Number(object.trackActivityQuerySize) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxWorkerProcesses = + object.maxWorkerProcesses !== undefined && + object.maxWorkerProcesses !== null + ? Number(object.maxWorkerProcesses) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor !== undefined && + object.autovacuumVacuumScaleFactor !== null + ? Number(object.autovacuumVacuumScaleFactor) + : undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor !== undefined && + object.autovacuumAnalyzeScaleFactor !== null + ? Number(object.autovacuumAnalyzeScaleFactor) + : undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly !== undefined && + object.defaultTransactionReadOnly !== null + ? Boolean(object.defaultTransactionReadOnly) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.enableParallelAppend = + object.enableParallelAppend !== undefined && + object.enableParallelAppend !== null + ? Boolean(object.enableParallelAppend) + : undefined; + message.enableParallelHash = + object.enableParallelHash !== undefined && + object.enableParallelHash !== null + ? Boolean(object.enableParallelHash) + : undefined; + message.enablePartitionPruning = + object.enablePartitionPruning !== undefined && + object.enablePartitionPruning !== null + ? Boolean(object.enablePartitionPruning) + : undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate !== undefined && + object.enablePartitionwiseAggregate !== null + ? Boolean(object.enablePartitionwiseAggregate) + : undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin !== undefined && + object.enablePartitionwiseJoin !== null + ? Boolean(object.enablePartitionwiseJoin) + : undefined; + message.jit = + object.jit !== undefined && object.jit !== null + ? Boolean(object.jit) + : undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers !== undefined && + object.maxParallelMaintenanceWorkers !== null + ? Number(object.maxParallelMaintenanceWorkers) + : undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation !== undefined && + object.parallelLeaderParticipation !== null + ? Boolean(object.parallelLeaderParticipation) + : undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate !== undefined && + object.logTransactionSampleRate !== null + ? Number(object.logTransactionSampleRate) + : undefined; + message.planCacheMode = + object.planCacheMode !== undefined && object.planCacheMode !== null + ? postgresqlConfig15_PlanCacheModeFromJSON(object.planCacheMode) + : 0; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + message.sharedPreloadLibraries = (object.sharedPreloadLibraries ?? []).map( + (e: any) => postgresqlConfig15_SharedPreloadLibrariesFromJSON(e) + ); + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration !== undefined && + object.autoExplainLogMinDuration !== null + ? Number(object.autoExplainLogMinDuration) + : undefined; + message.autoExplainLogAnalyze = + object.autoExplainLogAnalyze !== undefined && + object.autoExplainLogAnalyze !== null + ? Boolean(object.autoExplainLogAnalyze) + : undefined; + message.autoExplainLogBuffers = + object.autoExplainLogBuffers !== undefined && + object.autoExplainLogBuffers !== null + ? Boolean(object.autoExplainLogBuffers) + : undefined; + message.autoExplainLogTiming = + object.autoExplainLogTiming !== undefined && + object.autoExplainLogTiming !== null + ? Boolean(object.autoExplainLogTiming) + : undefined; + message.autoExplainLogTriggers = + object.autoExplainLogTriggers !== undefined && + object.autoExplainLogTriggers !== null + ? Boolean(object.autoExplainLogTriggers) + : undefined; + message.autoExplainLogVerbose = + object.autoExplainLogVerbose !== undefined && + object.autoExplainLogVerbose !== null + ? Boolean(object.autoExplainLogVerbose) + : undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements !== undefined && + object.autoExplainLogNestedStatements !== null + ? Boolean(object.autoExplainLogNestedStatements) + : undefined; + message.autoExplainSampleRate = + object.autoExplainSampleRate !== undefined && + object.autoExplainSampleRate !== null + ? Number(object.autoExplainSampleRate) + : undefined; + message.pgHintPlanEnableHint = + object.pgHintPlanEnableHint !== undefined && + object.pgHintPlanEnableHint !== null + ? Boolean(object.pgHintPlanEnableHint) + : undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable !== undefined && + object.pgHintPlanEnableHintTable !== null + ? Boolean(object.pgHintPlanEnableHintTable) + : undefined; + message.pgHintPlanDebugPrint = + object.pgHintPlanDebugPrint !== undefined && + object.pgHintPlanDebugPrint !== null + ? postgresqlConfig15_PgHintPlanDebugPrintFromJSON( + object.pgHintPlanDebugPrint + ) + : 0; + message.pgHintPlanMessageLevel = + object.pgHintPlanMessageLevel !== undefined && + object.pgHintPlanMessageLevel !== null + ? postgresqlConfig15_LogLevelFromJSON(object.pgHintPlanMessageLevel) + : 0; + message.hashMemMultiplier = + object.hashMemMultiplier !== undefined && + object.hashMemMultiplier !== null + ? Number(object.hashMemMultiplier) + : undefined; + message.logicalDecodingWorkMem = + object.logicalDecodingWorkMem !== undefined && + object.logicalDecodingWorkMem !== null + ? Number(object.logicalDecodingWorkMem) + : undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency !== undefined && + object.maintenanceIoConcurrency !== null + ? Number(object.maintenanceIoConcurrency) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.walKeepSize = + object.walKeepSize !== undefined && object.walKeepSize !== null + ? Number(object.walKeepSize) + : undefined; + message.enableIncrementalSort = + object.enableIncrementalSort !== undefined && + object.enableIncrementalSort !== null + ? Boolean(object.enableIncrementalSort) + : undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold !== undefined && + object.autovacuumVacuumInsertThreshold !== null + ? Number(object.autovacuumVacuumInsertThreshold) + : undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor !== undefined && + object.autovacuumVacuumInsertScaleFactor !== null + ? Number(object.autovacuumVacuumInsertScaleFactor) + : undefined; + message.logMinDurationSample = + object.logMinDurationSample !== undefined && + object.logMinDurationSample !== null + ? Number(object.logMinDurationSample) + : undefined; + message.logStatementSampleRate = + object.logStatementSampleRate !== undefined && + object.logStatementSampleRate !== null + ? Number(object.logStatementSampleRate) + : undefined; + message.logParameterMaxLength = + object.logParameterMaxLength !== undefined && + object.logParameterMaxLength !== null + ? Number(object.logParameterMaxLength) + : undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError !== undefined && + object.logParameterMaxLengthOnError !== null + ? Number(object.logParameterMaxLengthOnError) + : undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval !== undefined && + object.clientConnectionCheckInterval !== null + ? Number(object.clientConnectionCheckInterval) + : undefined; + message.enableAsyncAppend = + object.enableAsyncAppend !== undefined && + object.enableAsyncAppend !== null + ? Boolean(object.enableAsyncAppend) + : undefined; + message.enableGathermerge = + object.enableGathermerge !== undefined && + object.enableGathermerge !== null + ? Boolean(object.enableGathermerge) + : undefined; + message.enableMemoize = + object.enableMemoize !== undefined && object.enableMemoize !== null + ? Boolean(object.enableMemoize) + : undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits !== undefined && + object.logRecoveryConflictWaits !== null + ? Boolean(object.logRecoveryConflictWaits) + : undefined; + message.vacuumFailsafeAge = + object.vacuumFailsafeAge !== undefined && + object.vacuumFailsafeAge !== null + ? Number(object.vacuumFailsafeAge) + : undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge !== undefined && + object.vacuumMultixactFailsafeAge !== null + ? Number(object.vacuumMultixactFailsafeAge) + : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.enableGroupByReordering = + object.enableGroupByReordering !== undefined && + object.enableGroupByReordering !== null + ? Boolean(object.enableGroupByReordering) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; + return message; + }, + + toJSON(message: PostgresqlConfig15): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.maintenanceWorkMem !== undefined && + (obj.maintenanceWorkMem = message.maintenanceWorkMem); + message.autovacuumWorkMem !== undefined && + (obj.autovacuumWorkMem = message.autovacuumWorkMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.vacuumCostDelay !== undefined && + (obj.vacuumCostDelay = message.vacuumCostDelay); + message.vacuumCostPageHit !== undefined && + (obj.vacuumCostPageHit = message.vacuumCostPageHit); + message.vacuumCostPageMiss !== undefined && + (obj.vacuumCostPageMiss = message.vacuumCostPageMiss); + message.vacuumCostPageDirty !== undefined && + (obj.vacuumCostPageDirty = message.vacuumCostPageDirty); + message.vacuumCostLimit !== undefined && + (obj.vacuumCostLimit = message.vacuumCostLimit); + message.bgwriterDelay !== undefined && + (obj.bgwriterDelay = message.bgwriterDelay); + message.bgwriterLruMaxpages !== undefined && + (obj.bgwriterLruMaxpages = message.bgwriterLruMaxpages); + message.bgwriterLruMultiplier !== undefined && + (obj.bgwriterLruMultiplier = message.bgwriterLruMultiplier); + message.bgwriterFlushAfter !== undefined && + (obj.bgwriterFlushAfter = message.bgwriterFlushAfter); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.walLevel !== undefined && + (obj.walLevel = postgresqlConfig15_WalLevelToJSON(message.walLevel)); + message.synchronousCommit !== undefined && + (obj.synchronousCommit = postgresqlConfig15_SynchronousCommitToJSON( + message.synchronousCommit + )); + message.checkpointTimeout !== undefined && + (obj.checkpointTimeout = message.checkpointTimeout); + message.checkpointCompletionTarget !== undefined && + (obj.checkpointCompletionTarget = message.checkpointCompletionTarget); + message.checkpointFlushAfter !== undefined && + (obj.checkpointFlushAfter = message.checkpointFlushAfter); + message.maxWalSize !== undefined && (obj.maxWalSize = message.maxWalSize); + message.minWalSize !== undefined && (obj.minWalSize = message.minWalSize); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.defaultStatisticsTarget !== undefined && + (obj.defaultStatisticsTarget = message.defaultStatisticsTarget); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = postgresqlConfig15_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlConfig15_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlConfig15_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlConfig15_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlConfig15_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlConfig15_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlConfig15_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlConfig15_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlConfig15_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlConfig15_XmlBinaryToJSON(message.xmlbinary)); + message.xmloption !== undefined && + (obj.xmloption = postgresqlConfig15_XmlOptionToJSON(message.xmloption)); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlConfig15_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.autovacuumMaxWorkers !== undefined && + (obj.autovacuumMaxWorkers = message.autovacuumMaxWorkers); + message.autovacuumVacuumCostDelay !== undefined && + (obj.autovacuumVacuumCostDelay = message.autovacuumVacuumCostDelay); + message.autovacuumVacuumCostLimit !== undefined && + (obj.autovacuumVacuumCostLimit = message.autovacuumVacuumCostLimit); + message.autovacuumNaptime !== undefined && + (obj.autovacuumNaptime = message.autovacuumNaptime); + message.archiveTimeout !== undefined && + (obj.archiveTimeout = message.archiveTimeout); + message.trackActivityQuerySize !== undefined && + (obj.trackActivityQuerySize = message.trackActivityQuerySize); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxWorkerProcesses !== undefined && + (obj.maxWorkerProcesses = message.maxWorkerProcesses); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.autovacuumVacuumScaleFactor !== undefined && + (obj.autovacuumVacuumScaleFactor = message.autovacuumVacuumScaleFactor); + message.autovacuumAnalyzeScaleFactor !== undefined && + (obj.autovacuumAnalyzeScaleFactor = message.autovacuumAnalyzeScaleFactor); + message.defaultTransactionReadOnly !== undefined && + (obj.defaultTransactionReadOnly = message.defaultTransactionReadOnly); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.enableParallelAppend !== undefined && + (obj.enableParallelAppend = message.enableParallelAppend); + message.enableParallelHash !== undefined && + (obj.enableParallelHash = message.enableParallelHash); + message.enablePartitionPruning !== undefined && + (obj.enablePartitionPruning = message.enablePartitionPruning); + message.enablePartitionwiseAggregate !== undefined && + (obj.enablePartitionwiseAggregate = message.enablePartitionwiseAggregate); + message.enablePartitionwiseJoin !== undefined && + (obj.enablePartitionwiseJoin = message.enablePartitionwiseJoin); + message.jit !== undefined && (obj.jit = message.jit); + message.maxParallelMaintenanceWorkers !== undefined && + (obj.maxParallelMaintenanceWorkers = + message.maxParallelMaintenanceWorkers); + message.parallelLeaderParticipation !== undefined && + (obj.parallelLeaderParticipation = message.parallelLeaderParticipation); + message.logTransactionSampleRate !== undefined && + (obj.logTransactionSampleRate = message.logTransactionSampleRate); + message.planCacheMode !== undefined && + (obj.planCacheMode = postgresqlConfig15_PlanCacheModeToJSON( + message.planCacheMode + )); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + if (message.sharedPreloadLibraries) { + obj.sharedPreloadLibraries = message.sharedPreloadLibraries.map((e) => + postgresqlConfig15_SharedPreloadLibrariesToJSON(e) + ); + } else { + obj.sharedPreloadLibraries = []; + } + message.autoExplainLogMinDuration !== undefined && + (obj.autoExplainLogMinDuration = message.autoExplainLogMinDuration); + message.autoExplainLogAnalyze !== undefined && + (obj.autoExplainLogAnalyze = message.autoExplainLogAnalyze); + message.autoExplainLogBuffers !== undefined && + (obj.autoExplainLogBuffers = message.autoExplainLogBuffers); + message.autoExplainLogTiming !== undefined && + (obj.autoExplainLogTiming = message.autoExplainLogTiming); + message.autoExplainLogTriggers !== undefined && + (obj.autoExplainLogTriggers = message.autoExplainLogTriggers); + message.autoExplainLogVerbose !== undefined && + (obj.autoExplainLogVerbose = message.autoExplainLogVerbose); + message.autoExplainLogNestedStatements !== undefined && + (obj.autoExplainLogNestedStatements = + message.autoExplainLogNestedStatements); + message.autoExplainSampleRate !== undefined && + (obj.autoExplainSampleRate = message.autoExplainSampleRate); + message.pgHintPlanEnableHint !== undefined && + (obj.pgHintPlanEnableHint = message.pgHintPlanEnableHint); + message.pgHintPlanEnableHintTable !== undefined && + (obj.pgHintPlanEnableHintTable = message.pgHintPlanEnableHintTable); + message.pgHintPlanDebugPrint !== undefined && + (obj.pgHintPlanDebugPrint = postgresqlConfig15_PgHintPlanDebugPrintToJSON( + message.pgHintPlanDebugPrint + )); + message.pgHintPlanMessageLevel !== undefined && + (obj.pgHintPlanMessageLevel = postgresqlConfig15_LogLevelToJSON( + message.pgHintPlanMessageLevel + )); + message.hashMemMultiplier !== undefined && + (obj.hashMemMultiplier = message.hashMemMultiplier); + message.logicalDecodingWorkMem !== undefined && + (obj.logicalDecodingWorkMem = message.logicalDecodingWorkMem); + message.maintenanceIoConcurrency !== undefined && + (obj.maintenanceIoConcurrency = message.maintenanceIoConcurrency); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.walKeepSize !== undefined && + (obj.walKeepSize = message.walKeepSize); + message.enableIncrementalSort !== undefined && + (obj.enableIncrementalSort = message.enableIncrementalSort); + message.autovacuumVacuumInsertThreshold !== undefined && + (obj.autovacuumVacuumInsertThreshold = + message.autovacuumVacuumInsertThreshold); + message.autovacuumVacuumInsertScaleFactor !== undefined && + (obj.autovacuumVacuumInsertScaleFactor = + message.autovacuumVacuumInsertScaleFactor); + message.logMinDurationSample !== undefined && + (obj.logMinDurationSample = message.logMinDurationSample); + message.logStatementSampleRate !== undefined && + (obj.logStatementSampleRate = message.logStatementSampleRate); + message.logParameterMaxLength !== undefined && + (obj.logParameterMaxLength = message.logParameterMaxLength); + message.logParameterMaxLengthOnError !== undefined && + (obj.logParameterMaxLengthOnError = message.logParameterMaxLengthOnError); + message.clientConnectionCheckInterval !== undefined && + (obj.clientConnectionCheckInterval = + message.clientConnectionCheckInterval); + message.enableAsyncAppend !== undefined && + (obj.enableAsyncAppend = message.enableAsyncAppend); + message.enableGathermerge !== undefined && + (obj.enableGathermerge = message.enableGathermerge); + message.enableMemoize !== undefined && + (obj.enableMemoize = message.enableMemoize); + message.logRecoveryConflictWaits !== undefined && + (obj.logRecoveryConflictWaits = message.logRecoveryConflictWaits); + message.vacuumFailsafeAge !== undefined && + (obj.vacuumFailsafeAge = message.vacuumFailsafeAge); + message.vacuumMultixactFailsafeAge !== undefined && + (obj.vacuumMultixactFailsafeAge = message.vacuumMultixactFailsafeAge); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.enableGroupByReordering !== undefined && + (obj.enableGroupByReordering = message.enableGroupByReordering); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlConfig15 { + const message = { ...basePostgresqlConfig15 } as PostgresqlConfig15; + message.maxConnections = object.maxConnections ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.workMem = object.workMem ?? undefined; + message.maintenanceWorkMem = object.maintenanceWorkMem ?? undefined; + message.autovacuumWorkMem = object.autovacuumWorkMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.vacuumCostDelay = object.vacuumCostDelay ?? undefined; + message.vacuumCostPageHit = object.vacuumCostPageHit ?? undefined; + message.vacuumCostPageMiss = object.vacuumCostPageMiss ?? undefined; + message.vacuumCostPageDirty = object.vacuumCostPageDirty ?? undefined; + message.vacuumCostLimit = object.vacuumCostLimit ?? undefined; + message.bgwriterDelay = object.bgwriterDelay ?? undefined; + message.bgwriterLruMaxpages = object.bgwriterLruMaxpages ?? undefined; + message.bgwriterLruMultiplier = object.bgwriterLruMultiplier ?? undefined; + message.bgwriterFlushAfter = object.bgwriterFlushAfter ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.walLevel = object.walLevel ?? 0; + message.synchronousCommit = object.synchronousCommit ?? 0; + message.checkpointTimeout = object.checkpointTimeout ?? undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget ?? undefined; + message.checkpointFlushAfter = object.checkpointFlushAfter ?? undefined; + message.maxWalSize = object.maxWalSize ?? undefined; + message.minWalSize = object.minWalSize ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.autovacuumMaxWorkers = object.autovacuumMaxWorkers ?? undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay ?? undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit ?? undefined; + message.autovacuumNaptime = object.autovacuumNaptime ?? undefined; + message.archiveTimeout = object.archiveTimeout ?? undefined; + message.trackActivityQuerySize = object.trackActivityQuerySize ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxWorkerProcesses = object.maxWorkerProcesses ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor ?? undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor ?? undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly ?? undefined; + message.timezone = object.timezone ?? ""; + message.enableParallelAppend = object.enableParallelAppend ?? undefined; + message.enableParallelHash = object.enableParallelHash ?? undefined; + message.enablePartitionPruning = object.enablePartitionPruning ?? undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate ?? undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin ?? undefined; + message.jit = object.jit ?? undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers ?? undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation ?? undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate ?? undefined; + message.planCacheMode = object.planCacheMode ?? 0; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + message.sharedPreloadLibraries = + object.sharedPreloadLibraries?.map((e) => e) || []; + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration ?? undefined; + message.autoExplainLogAnalyze = object.autoExplainLogAnalyze ?? undefined; + message.autoExplainLogBuffers = object.autoExplainLogBuffers ?? undefined; + message.autoExplainLogTiming = object.autoExplainLogTiming ?? undefined; + message.autoExplainLogTriggers = object.autoExplainLogTriggers ?? undefined; + message.autoExplainLogVerbose = object.autoExplainLogVerbose ?? undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements ?? undefined; + message.autoExplainSampleRate = object.autoExplainSampleRate ?? undefined; + message.pgHintPlanEnableHint = object.pgHintPlanEnableHint ?? undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable ?? undefined; + message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; + message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.hashMemMultiplier = object.hashMemMultiplier ?? undefined; + message.logicalDecodingWorkMem = object.logicalDecodingWorkMem ?? undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.walKeepSize = object.walKeepSize ?? undefined; + message.enableIncrementalSort = object.enableIncrementalSort ?? undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold ?? undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor ?? undefined; + message.logMinDurationSample = object.logMinDurationSample ?? undefined; + message.logStatementSampleRate = object.logStatementSampleRate ?? undefined; + message.logParameterMaxLength = object.logParameterMaxLength ?? undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError ?? undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval ?? undefined; + message.enableAsyncAppend = object.enableAsyncAppend ?? undefined; + message.enableGathermerge = object.enableGathermerge ?? undefined; + message.enableMemoize = object.enableMemoize ?? undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits ?? undefined; + message.vacuumFailsafeAge = object.vacuumFailsafeAge ?? undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.enableGroupByReordering = + object.enableGroupByReordering ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlConfig15.$type, PostgresqlConfig15); + +const basePostgresqlConfigSet15: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet15", +}; + +export const PostgresqlConfigSet15 = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet15" as const, + + encode( + message: PostgresqlConfigSet15, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + PostgresqlConfig15.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + PostgresqlConfig15.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + PostgresqlConfig15.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PostgresqlConfigSet15 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlConfigSet15 } as PostgresqlConfigSet15; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = PostgresqlConfig15.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = PostgresqlConfig15.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = PostgresqlConfig15.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlConfigSet15 { + const message = { ...basePostgresqlConfigSet15 } as PostgresqlConfigSet15; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? PostgresqlConfig15.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? PostgresqlConfig15.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? PostgresqlConfig15.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: PostgresqlConfigSet15): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? PostgresqlConfig15.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? PostgresqlConfig15.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? PostgresqlConfig15.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlConfigSet15 { + const message = { ...basePostgresqlConfigSet15 } as PostgresqlConfigSet15; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? PostgresqlConfig15.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? PostgresqlConfig15.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? PostgresqlConfig15.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlConfigSet15.$type, PostgresqlConfigSet15); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql15_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql15_1c.ts new file mode 100644 index 00000000..df809f9c --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql15_1c.ts @@ -0,0 +1,4684 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface Postgresqlconfig151c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig15_1C"; + maxConnections?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + maxPreparedTransactions?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + maintenanceWorkMem?: number; + /** in bytes. */ + autovacuumWorkMem?: number; + /** in bytes. */ + tempFileLimit?: number; + /** in milliseconds. */ + vacuumCostDelay?: number; + vacuumCostPageHit?: number; + vacuumCostPageMiss?: number; + vacuumCostPageDirty?: number; + vacuumCostLimit?: number; + /** in milliseconds. */ + bgwriterDelay?: number; + bgwriterLruMaxpages?: number; + bgwriterLruMultiplier?: number; + bgwriterFlushAfter?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + walLevel: Postgresqlconfig151c_WalLevel; + synchronousCommit: Postgresqlconfig151c_SynchronousCommit; + /** in milliseconds. */ + checkpointTimeout?: number; + checkpointCompletionTarget?: number; + checkpointFlushAfter?: number; + /** in bytes. */ + maxWalSize?: number; + /** in bytes. */ + minWalSize?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + defaultStatisticsTarget?: number; + constraintExclusion: Postgresqlconfig151c_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: Postgresqlconfig151c_ForceParallelMode; + clientMinMessages: Postgresqlconfig151c_LogLevel; + logMinMessages: Postgresqlconfig151c_LogLevel; + logMinErrorStatement: Postgresqlconfig151c_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: Postgresqlconfig151c_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: Postgresqlconfig151c_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: Postgresqlconfig151c_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: Postgresqlconfig151c_ByteaOutput; + xmlbinary: Postgresqlconfig151c_XmlBinary; + xmloption: Postgresqlconfig151c_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: Postgresqlconfig151c_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + autovacuumMaxWorkers?: number; + autovacuumVacuumCostDelay?: number; + autovacuumVacuumCostLimit?: number; + /** in milliseconds. */ + autovacuumNaptime?: number; + /** in milliseconds. */ + archiveTimeout?: number; + trackActivityQuerySize?: number; + onlineAnalyzeEnable?: boolean; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxWorkerProcesses?: number; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + autovacuumVacuumScaleFactor?: number; + autovacuumAnalyzeScaleFactor?: number; + defaultTransactionReadOnly?: boolean; + timezone: string; + enableParallelAppend?: boolean; + enableParallelHash?: boolean; + enablePartitionPruning?: boolean; + enablePartitionwiseAggregate?: boolean; + enablePartitionwiseJoin?: boolean; + jit?: boolean; + maxParallelMaintenanceWorkers?: number; + parallelLeaderParticipation?: boolean; + logTransactionSampleRate?: number; + planCacheMode: Postgresqlconfig151c_PlanCacheMode; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; + sharedPreloadLibraries: Postgresqlconfig151c_SharedPreloadLibraries[]; + /** in milliseconds. */ + autoExplainLogMinDuration?: number; + autoExplainLogAnalyze?: boolean; + autoExplainLogBuffers?: boolean; + autoExplainLogTiming?: boolean; + autoExplainLogTriggers?: boolean; + autoExplainLogVerbose?: boolean; + autoExplainLogNestedStatements?: boolean; + autoExplainSampleRate?: number; + pgHintPlanEnableHint?: boolean; + pgHintPlanEnableHintTable?: boolean; + pgHintPlanDebugPrint: Postgresqlconfig151c_PgHintPlanDebugPrint; + pgHintPlanMessageLevel: Postgresqlconfig151c_LogLevel; + hashMemMultiplier?: number; + /** in bytes. */ + logicalDecodingWorkMem?: number; + maintenanceIoConcurrency?: number; + /** in bytes. */ + maxSlotWalKeepSize?: number; + /** in bytes. */ + walKeepSize?: number; + enableIncrementalSort?: boolean; + autovacuumVacuumInsertThreshold?: number; + autovacuumVacuumInsertScaleFactor?: number; + /** in milliseconds. */ + logMinDurationSample?: number; + logStatementSampleRate?: number; + /** in bytes. */ + logParameterMaxLength?: number; + /** in bytes. */ + logParameterMaxLengthOnError?: number; + /** in milliseconds. */ + clientConnectionCheckInterval?: number; + enableAsyncAppend?: boolean; + enableGathermerge?: boolean; + enableMemoize?: boolean; + /** in milliseconds. */ + logRecoveryConflictWaits?: boolean; + /** in milliseconds. */ + vacuumFailsafeAge?: number; + /** in milliseconds. */ + vacuumMultixactFailsafeAge?: number; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; + plantunerFixEmptyTable?: boolean; + /** in bytes. */ + maxStackDepth?: number; + enableGroupByReordering?: boolean; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; +} + +export enum Postgresqlconfig151c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_BackslashQuoteFromJSON( + object: any +): Postgresqlconfig151c_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlconfig151c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return Postgresqlconfig151c_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return Postgresqlconfig151c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlconfig151c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlconfig151c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_BackslashQuoteToJSON( + object: Postgresqlconfig151c_BackslashQuote +): string { + switch (object) { + case Postgresqlconfig151c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlconfig151c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlconfig151c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlconfig151c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlconfig151c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_ByteaOutputFromJSON( + object: any +): Postgresqlconfig151c_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlconfig151c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return Postgresqlconfig151c_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlconfig151c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_ByteaOutputToJSON( + object: Postgresqlconfig151c_ByteaOutput +): string { + switch (object) { + case Postgresqlconfig151c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlconfig151c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlconfig151c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_ConstraintExclusionFromJSON( + object: any +): Postgresqlconfig151c_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return Postgresqlconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return Postgresqlconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return Postgresqlconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return Postgresqlconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_ConstraintExclusionToJSON( + object: Postgresqlconfig151c_ConstraintExclusion +): string { + switch (object) { + case Postgresqlconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case Postgresqlconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case Postgresqlconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case Postgresqlconfig151c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_ForceParallelModeFromJSON( + object: any +): Postgresqlconfig151c_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return Postgresqlconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return Postgresqlconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return Postgresqlconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return Postgresqlconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_ForceParallelModeToJSON( + object: Postgresqlconfig151c_ForceParallelMode +): string { + switch (object) { + case Postgresqlconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case Postgresqlconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case Postgresqlconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case Postgresqlconfig151c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlconfig151c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_LogErrorVerbosityToJSON( + object: Postgresqlconfig151c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlconfig151c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_LogLevelFromJSON( + object: any +): Postgresqlconfig151c_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return Postgresqlconfig151c_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_LogLevelToJSON( + object: Postgresqlconfig151c_LogLevel +): string { + switch (object) { + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case Postgresqlconfig151c_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_LogStatementFromJSON( + object: any +): Postgresqlconfig151c_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return Postgresqlconfig151c_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return Postgresqlconfig151c_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return Postgresqlconfig151c_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return Postgresqlconfig151c_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return Postgresqlconfig151c_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_LogStatementToJSON( + object: Postgresqlconfig151c_LogStatement +): string { + switch (object) { + case Postgresqlconfig151c_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case Postgresqlconfig151c_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case Postgresqlconfig151c_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case Postgresqlconfig151c_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case Postgresqlconfig151c_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_PgHintPlanDebugPrintFromJSON( + object: any +): Postgresqlconfig151c_PgHintPlanDebugPrint { + switch (object) { + case 0: + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return Postgresqlconfig151c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case 1: + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return Postgresqlconfig151c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case 2: + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return Postgresqlconfig151c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; + case 3: + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return Postgresqlconfig151c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; + case 4: + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return Postgresqlconfig151c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_PgHintPlanDebugPrint.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_PgHintPlanDebugPrintToJSON( + object: Postgresqlconfig151c_PgHintPlanDebugPrint +): string { + switch (object) { + case Postgresqlconfig151c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case Postgresqlconfig151c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case Postgresqlconfig151c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case Postgresqlconfig151c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case Postgresqlconfig151c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_PlanCacheModeFromJSON( + object: any +): Postgresqlconfig151c_PlanCacheMode { + switch (object) { + case 0: + case "PLAN_CACHE_MODE_UNSPECIFIED": + return Postgresqlconfig151c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case 1: + case "PLAN_CACHE_MODE_AUTO": + return Postgresqlconfig151c_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case 2: + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return Postgresqlconfig151c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return Postgresqlconfig151c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_PlanCacheMode.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_PlanCacheModeToJSON( + object: Postgresqlconfig151c_PlanCacheMode +): string { + switch (object) { + case Postgresqlconfig151c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case Postgresqlconfig151c_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case Postgresqlconfig151c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case Postgresqlconfig151c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_SharedPreloadLibrariesFromJSON( + object: any +): Postgresqlconfig151c_SharedPreloadLibraries { + switch (object) { + case 0: + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case 1: + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case 2: + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_SharedPreloadLibraries.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_SharedPreloadLibrariesToJSON( + object: Postgresqlconfig151c_SharedPreloadLibraries +): string { + switch (object) { + case Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case Postgresqlconfig151c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_SynchronousCommitFromJSON( + object: any +): Postgresqlconfig151c_SynchronousCommit { + switch (object) { + case 0: + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case 1: + case "SYNCHRONOUS_COMMIT_ON": + return Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case 2: + case "SYNCHRONOUS_COMMIT_OFF": + return Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_SynchronousCommit.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_SynchronousCommitToJSON( + object: Postgresqlconfig151c_SynchronousCommit +): string { + switch (object) { + case Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case Postgresqlconfig151c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_TransactionIsolationFromJSON( + object: any +): Postgresqlconfig151c_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return Postgresqlconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return Postgresqlconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return Postgresqlconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return Postgresqlconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return Postgresqlconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_TransactionIsolationToJSON( + object: Postgresqlconfig151c_TransactionIsolation +): string { + switch (object) { + case Postgresqlconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case Postgresqlconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case Postgresqlconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case Postgresqlconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case Postgresqlconfig151c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_WalLevelFromJSON( + object: any +): Postgresqlconfig151c_WalLevel { + switch (object) { + case 0: + case "WAL_LEVEL_UNSPECIFIED": + return Postgresqlconfig151c_WalLevel.WAL_LEVEL_UNSPECIFIED; + case 1: + case "WAL_LEVEL_REPLICA": + return Postgresqlconfig151c_WalLevel.WAL_LEVEL_REPLICA; + case 2: + case "WAL_LEVEL_LOGICAL": + return Postgresqlconfig151c_WalLevel.WAL_LEVEL_LOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_WalLevel.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_WalLevelToJSON( + object: Postgresqlconfig151c_WalLevel +): string { + switch (object) { + case Postgresqlconfig151c_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case Postgresqlconfig151c_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case Postgresqlconfig151c_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_XmlBinaryFromJSON( + object: any +): Postgresqlconfig151c_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return Postgresqlconfig151c_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return Postgresqlconfig151c_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return Postgresqlconfig151c_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_XmlBinaryToJSON( + object: Postgresqlconfig151c_XmlBinary +): string { + switch (object) { + case Postgresqlconfig151c_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case Postgresqlconfig151c_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case Postgresqlconfig151c_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig151c_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig151c_XmlOptionFromJSON( + object: any +): Postgresqlconfig151c_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return Postgresqlconfig151c_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return Postgresqlconfig151c_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return Postgresqlconfig151c_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig151c_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlconfig151c_XmlOptionToJSON( + object: Postgresqlconfig151c_XmlOption +): string { + switch (object) { + case Postgresqlconfig151c_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case Postgresqlconfig151c_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case Postgresqlconfig151c_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +export interface Postgresqlconfigset151c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet15_1C"; + /** + * Effective settings for a PostgreSQL 15 1C cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Postgresqlconfig151c; + /** User-defined settings for a PostgreSQL 15 1C cluster. */ + userConfig?: Postgresqlconfig151c; + /** Default configuration for a PostgreSQL 15 1C cluster. */ + defaultConfig?: Postgresqlconfig151c; +} + +const basePostgresqlconfig151c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig15_1C", + walLevel: 0, + synchronousCommit: 0, + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", + planCacheMode: 0, + sharedPreloadLibraries: 0, + pgHintPlanDebugPrint: 0, + pgHintPlanMessageLevel: 0, +}; + +export const Postgresqlconfig151c = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig15_1C" as const, + + encode( + message: Postgresqlconfig151c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maintenanceWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceWorkMem!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.autovacuumWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumWorkMem!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.vacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostDelay!, + }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.vacuumCostPageHit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageHit!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.vacuumCostPageMiss !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageMiss!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.vacuumCostPageDirty !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageDirty!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.vacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostLimit!, + }, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.bgwriterDelay !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.bgwriterDelay! }, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.bgwriterLruMaxpages !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterLruMaxpages!, + }, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.bgwriterLruMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.bgwriterLruMultiplier!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.bgwriterFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterFlushAfter!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.walLevel !== 0) { + writer.uint32(160).int32(message.walLevel); + } + if (message.synchronousCommit !== 0) { + writer.uint32(168).int32(message.synchronousCommit); + } + if (message.checkpointTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointTimeout!, + }, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.checkpointCompletionTarget !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.checkpointCompletionTarget!, + }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.checkpointFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointFlushAfter!, + }, + writer.uint32(194).fork() + ).ldelim(); + } + if (message.maxWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxWalSize! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.minWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.minWalSize! }, + writer.uint32(210).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultStatisticsTarget !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.defaultStatisticsTarget!, + }, + writer.uint32(226).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(232).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(258).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(264).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(272).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(280).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(288).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(322).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(336).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(352).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(370).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(384).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(416).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(424).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(432).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(480).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(554).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(562).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(570).fork() + ).ldelim(); + } + if (message.autovacuumMaxWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumMaxWorkers!, + }, + writer.uint32(578).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostDelay!, + }, + writer.uint32(586).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostLimit!, + }, + writer.uint32(594).fork() + ).ldelim(); + } + if (message.autovacuumNaptime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumNaptime!, + }, + writer.uint32(602).fork() + ).ldelim(); + } + if (message.archiveTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.archiveTimeout! }, + writer.uint32(610).fork() + ).ldelim(); + } + if (message.trackActivityQuerySize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.trackActivityQuerySize!, + }, + writer.uint32(618).fork() + ).ldelim(); + } + if (message.onlineAnalyzeEnable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.onlineAnalyzeEnable!, + }, + writer.uint32(634).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(642).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(650).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(658).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(666).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(674).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(682).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(690).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(698).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(706).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(714).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(722).fork() + ).ldelim(); + } + if (message.maxWorkerProcesses !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxWorkerProcesses!, + }, + writer.uint32(730).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(738).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(746).fork() + ).ldelim(); + } + if (message.autovacuumVacuumScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumScaleFactor!, + }, + writer.uint32(754).fork() + ).ldelim(); + } + if (message.autovacuumAnalyzeScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumAnalyzeScaleFactor!, + }, + writer.uint32(762).fork() + ).ldelim(); + } + if (message.defaultTransactionReadOnly !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.defaultTransactionReadOnly!, + }, + writer.uint32(770).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(778).string(message.timezone); + } + if (message.enableParallelAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelAppend!, + }, + writer.uint32(786).fork() + ).ldelim(); + } + if (message.enableParallelHash !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelHash!, + }, + writer.uint32(794).fork() + ).ldelim(); + } + if (message.enablePartitionPruning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionPruning!, + }, + writer.uint32(802).fork() + ).ldelim(); + } + if (message.enablePartitionwiseAggregate !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseAggregate!, + }, + writer.uint32(810).fork() + ).ldelim(); + } + if (message.enablePartitionwiseJoin !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseJoin!, + }, + writer.uint32(818).fork() + ).ldelim(); + } + if (message.jit !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.jit! }, + writer.uint32(826).fork() + ).ldelim(); + } + if (message.maxParallelMaintenanceWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelMaintenanceWorkers!, + }, + writer.uint32(834).fork() + ).ldelim(); + } + if (message.parallelLeaderParticipation !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.parallelLeaderParticipation!, + }, + writer.uint32(842).fork() + ).ldelim(); + } + if (message.logTransactionSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logTransactionSampleRate!, + }, + writer.uint32(858).fork() + ).ldelim(); + } + if (message.planCacheMode !== 0) { + writer.uint32(864).int32(message.planCacheMode); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(874).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(882).fork() + ).ldelim(); + } + writer.uint32(890).fork(); + for (const v of message.sharedPreloadLibraries) { + writer.int32(v); + } + writer.ldelim(); + if (message.autoExplainLogMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autoExplainLogMinDuration!, + }, + writer.uint32(898).fork() + ).ldelim(); + } + if (message.autoExplainLogAnalyze !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogAnalyze!, + }, + writer.uint32(906).fork() + ).ldelim(); + } + if (message.autoExplainLogBuffers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogBuffers!, + }, + writer.uint32(914).fork() + ).ldelim(); + } + if (message.autoExplainLogTiming !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTiming!, + }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.autoExplainLogTriggers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTriggers!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.autoExplainLogVerbose !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogVerbose!, + }, + writer.uint32(938).fork() + ).ldelim(); + } + if (message.autoExplainLogNestedStatements !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogNestedStatements!, + }, + writer.uint32(946).fork() + ).ldelim(); + } + if (message.autoExplainSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autoExplainSampleRate!, + }, + writer.uint32(954).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHint !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHint!, + }, + writer.uint32(962).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHintTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHintTable!, + }, + writer.uint32(970).fork() + ).ldelim(); + } + if (message.pgHintPlanDebugPrint !== 0) { + writer.uint32(976).int32(message.pgHintPlanDebugPrint); + } + if (message.pgHintPlanMessageLevel !== 0) { + writer.uint32(984).int32(message.pgHintPlanMessageLevel); + } + if (message.hashMemMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.hashMemMultiplier!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.logicalDecodingWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logicalDecodingWorkMem!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.maintenanceIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceIoConcurrency!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } + if (message.walKeepSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.walKeepSize! }, + writer.uint32(1034).fork() + ).ldelim(); + } + if (message.enableIncrementalSort !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIncrementalSort!, + }, + writer.uint32(1042).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumInsertThreshold!, + }, + writer.uint32(1050).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumInsertScaleFactor!, + }, + writer.uint32(1058).fork() + ).ldelim(); + } + if (message.logMinDurationSample !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationSample!, + }, + writer.uint32(1066).fork() + ).ldelim(); + } + if (message.logStatementSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logStatementSampleRate!, + }, + writer.uint32(1074).fork() + ).ldelim(); + } + if (message.logParameterMaxLength !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLength!, + }, + writer.uint32(1082).fork() + ).ldelim(); + } + if (message.logParameterMaxLengthOnError !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLengthOnError!, + }, + writer.uint32(1090).fork() + ).ldelim(); + } + if (message.clientConnectionCheckInterval !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.clientConnectionCheckInterval!, + }, + writer.uint32(1098).fork() + ).ldelim(); + } + if (message.enableAsyncAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableAsyncAppend!, + }, + writer.uint32(1106).fork() + ).ldelim(); + } + if (message.enableGathermerge !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableGathermerge!, + }, + writer.uint32(1114).fork() + ).ldelim(); + } + if (message.enableMemoize !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMemoize! }, + writer.uint32(1122).fork() + ).ldelim(); + } + if (message.logRecoveryConflictWaits !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logRecoveryConflictWaits!, + }, + writer.uint32(1130).fork() + ).ldelim(); + } + if (message.vacuumFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumFailsafeAge!, + }, + writer.uint32(1138).fork() + ).ldelim(); + } + if (message.vacuumMultixactFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumMultixactFailsafeAge!, + }, + writer.uint32(1146).fork() + ).ldelim(); + } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(1154).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1162).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1170).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1178).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1186).fork() + ).ldelim(); + } + if (message.plantunerFixEmptyTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.plantunerFixEmptyTable!, + }, + writer.uint32(1194).fork() + ).ldelim(); + } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.enableGroupByReordering !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableGroupByReordering!, + }, + writer.uint32(1210).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlconfig151c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlconfig151c } as Postgresqlconfig151c; + message.sharedPreloadLibraries = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 6: + message.maintenanceWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.autovacuumWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.vacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.vacuumCostPageHit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.vacuumCostPageMiss = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.vacuumCostPageDirty = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.vacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 14: + message.bgwriterDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 15: + message.bgwriterLruMaxpages = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 16: + message.bgwriterLruMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.bgwriterFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.walLevel = reader.int32() as any; + break; + case 21: + message.synchronousCommit = reader.int32() as any; + break; + case 22: + message.checkpointTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 23: + message.checkpointCompletionTarget = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.checkpointFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 25: + message.maxWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 26: + message.minWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 27: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 28: + message.defaultStatisticsTarget = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 29: + message.constraintExclusion = reader.int32() as any; + break; + case 30: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 33: + message.forceParallelMode = reader.int32() as any; + break; + case 34: + message.clientMinMessages = reader.int32() as any; + break; + case 35: + message.logMinMessages = reader.int32() as any; + break; + case 36: + message.logMinErrorStatement = reader.int32() as any; + break; + case 37: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 40: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 41: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 42: + message.logErrorVerbosity = reader.int32() as any; + break; + case 43: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 44: + message.logStatement = reader.int32() as any; + break; + case 45: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.searchPath = reader.string(); + break; + case 47: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 48: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 49: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 50: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 52: + message.byteaOutput = reader.int32() as any; + break; + case 53: + message.xmlbinary = reader.int32() as any; + break; + case 54: + message.xmloption = reader.int32() as any; + break; + case 55: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 60: + message.backslashQuote = reader.int32() as any; + break; + case 61: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 70: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 71: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 72: + message.autovacuumMaxWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 73: + message.autovacuumVacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 74: + message.autovacuumVacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 75: + message.autovacuumNaptime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 76: + message.archiveTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 77: + message.trackActivityQuerySize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 79: + message.onlineAnalyzeEnable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 80: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 81: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 82: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 83: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 84: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 85: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 86: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 87: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 88: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 89: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 90: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 91: + message.maxWorkerProcesses = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 92: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 93: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 94: + message.autovacuumVacuumScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 95: + message.autovacuumAnalyzeScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 96: + message.defaultTransactionReadOnly = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 97: + message.timezone = reader.string(); + break; + case 98: + message.enableParallelAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 99: + message.enableParallelHash = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 100: + message.enablePartitionPruning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 101: + message.enablePartitionwiseAggregate = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 102: + message.enablePartitionwiseJoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 103: + message.jit = BoolValue.decode(reader, reader.uint32()).value; + break; + case 104: + message.maxParallelMaintenanceWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 105: + message.parallelLeaderParticipation = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 107: + message.logTransactionSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 108: + message.planCacheMode = reader.int32() as any; + break; + case 109: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 110: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 111: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + } else { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + break; + case 112: + message.autoExplainLogMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 113: + message.autoExplainLogAnalyze = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 114: + message.autoExplainLogBuffers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 115: + message.autoExplainLogTiming = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.autoExplainLogTriggers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.autoExplainLogVerbose = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 118: + message.autoExplainLogNestedStatements = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 119: + message.autoExplainSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 120: + message.pgHintPlanEnableHint = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 121: + message.pgHintPlanEnableHintTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 122: + message.pgHintPlanDebugPrint = reader.int32() as any; + break; + case 123: + message.pgHintPlanMessageLevel = reader.int32() as any; + break; + case 124: + message.hashMemMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.logicalDecodingWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.maintenanceIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 129: + message.walKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 130: + message.enableIncrementalSort = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 131: + message.autovacuumVacuumInsertThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 132: + message.autovacuumVacuumInsertScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 133: + message.logMinDurationSample = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 134: + message.logStatementSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 135: + message.logParameterMaxLength = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 136: + message.logParameterMaxLengthOnError = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 137: + message.clientConnectionCheckInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 138: + message.enableAsyncAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 139: + message.enableGathermerge = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 140: + message.enableMemoize = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 141: + message.logRecoveryConflictWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 142: + message.vacuumFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 143: + message.vacuumMultixactFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 144: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 145: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 146: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 147: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 148: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 149: + message.plantunerFixEmptyTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 151: + message.enableGroupByReordering = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlconfig151c { + const message = { ...basePostgresqlconfig151c } as Postgresqlconfig151c; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.maintenanceWorkMem = + object.maintenanceWorkMem !== undefined && + object.maintenanceWorkMem !== null + ? Number(object.maintenanceWorkMem) + : undefined; + message.autovacuumWorkMem = + object.autovacuumWorkMem !== undefined && + object.autovacuumWorkMem !== null + ? Number(object.autovacuumWorkMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.vacuumCostDelay = + object.vacuumCostDelay !== undefined && object.vacuumCostDelay !== null + ? Number(object.vacuumCostDelay) + : undefined; + message.vacuumCostPageHit = + object.vacuumCostPageHit !== undefined && + object.vacuumCostPageHit !== null + ? Number(object.vacuumCostPageHit) + : undefined; + message.vacuumCostPageMiss = + object.vacuumCostPageMiss !== undefined && + object.vacuumCostPageMiss !== null + ? Number(object.vacuumCostPageMiss) + : undefined; + message.vacuumCostPageDirty = + object.vacuumCostPageDirty !== undefined && + object.vacuumCostPageDirty !== null + ? Number(object.vacuumCostPageDirty) + : undefined; + message.vacuumCostLimit = + object.vacuumCostLimit !== undefined && object.vacuumCostLimit !== null + ? Number(object.vacuumCostLimit) + : undefined; + message.bgwriterDelay = + object.bgwriterDelay !== undefined && object.bgwriterDelay !== null + ? Number(object.bgwriterDelay) + : undefined; + message.bgwriterLruMaxpages = + object.bgwriterLruMaxpages !== undefined && + object.bgwriterLruMaxpages !== null + ? Number(object.bgwriterLruMaxpages) + : undefined; + message.bgwriterLruMultiplier = + object.bgwriterLruMultiplier !== undefined && + object.bgwriterLruMultiplier !== null + ? Number(object.bgwriterLruMultiplier) + : undefined; + message.bgwriterFlushAfter = + object.bgwriterFlushAfter !== undefined && + object.bgwriterFlushAfter !== null + ? Number(object.bgwriterFlushAfter) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.walLevel = + object.walLevel !== undefined && object.walLevel !== null + ? postgresqlconfig151c_WalLevelFromJSON(object.walLevel) + : 0; + message.synchronousCommit = + object.synchronousCommit !== undefined && + object.synchronousCommit !== null + ? postgresqlconfig151c_SynchronousCommitFromJSON( + object.synchronousCommit + ) + : 0; + message.checkpointTimeout = + object.checkpointTimeout !== undefined && + object.checkpointTimeout !== null + ? Number(object.checkpointTimeout) + : undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget !== undefined && + object.checkpointCompletionTarget !== null + ? Number(object.checkpointCompletionTarget) + : undefined; + message.checkpointFlushAfter = + object.checkpointFlushAfter !== undefined && + object.checkpointFlushAfter !== null + ? Number(object.checkpointFlushAfter) + : undefined; + message.maxWalSize = + object.maxWalSize !== undefined && object.maxWalSize !== null + ? Number(object.maxWalSize) + : undefined; + message.minWalSize = + object.minWalSize !== undefined && object.minWalSize !== null + ? Number(object.minWalSize) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget !== undefined && + object.defaultStatisticsTarget !== null + ? Number(object.defaultStatisticsTarget) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlconfig151c_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlconfig151c_ForceParallelModeFromJSON( + object.forceParallelMode + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlconfig151c_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlconfig151c_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlconfig151c_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlconfig151c_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlconfig151c_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlconfig151c_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlconfig151c_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlconfig151c_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlconfig151c_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlconfig151c_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.autovacuumMaxWorkers = + object.autovacuumMaxWorkers !== undefined && + object.autovacuumMaxWorkers !== null + ? Number(object.autovacuumMaxWorkers) + : undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay !== undefined && + object.autovacuumVacuumCostDelay !== null + ? Number(object.autovacuumVacuumCostDelay) + : undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit !== undefined && + object.autovacuumVacuumCostLimit !== null + ? Number(object.autovacuumVacuumCostLimit) + : undefined; + message.autovacuumNaptime = + object.autovacuumNaptime !== undefined && + object.autovacuumNaptime !== null + ? Number(object.autovacuumNaptime) + : undefined; + message.archiveTimeout = + object.archiveTimeout !== undefined && object.archiveTimeout !== null + ? Number(object.archiveTimeout) + : undefined; + message.trackActivityQuerySize = + object.trackActivityQuerySize !== undefined && + object.trackActivityQuerySize !== null + ? Number(object.trackActivityQuerySize) + : undefined; + message.onlineAnalyzeEnable = + object.onlineAnalyzeEnable !== undefined && + object.onlineAnalyzeEnable !== null + ? Boolean(object.onlineAnalyzeEnable) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxWorkerProcesses = + object.maxWorkerProcesses !== undefined && + object.maxWorkerProcesses !== null + ? Number(object.maxWorkerProcesses) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor !== undefined && + object.autovacuumVacuumScaleFactor !== null + ? Number(object.autovacuumVacuumScaleFactor) + : undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor !== undefined && + object.autovacuumAnalyzeScaleFactor !== null + ? Number(object.autovacuumAnalyzeScaleFactor) + : undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly !== undefined && + object.defaultTransactionReadOnly !== null + ? Boolean(object.defaultTransactionReadOnly) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.enableParallelAppend = + object.enableParallelAppend !== undefined && + object.enableParallelAppend !== null + ? Boolean(object.enableParallelAppend) + : undefined; + message.enableParallelHash = + object.enableParallelHash !== undefined && + object.enableParallelHash !== null + ? Boolean(object.enableParallelHash) + : undefined; + message.enablePartitionPruning = + object.enablePartitionPruning !== undefined && + object.enablePartitionPruning !== null + ? Boolean(object.enablePartitionPruning) + : undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate !== undefined && + object.enablePartitionwiseAggregate !== null + ? Boolean(object.enablePartitionwiseAggregate) + : undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin !== undefined && + object.enablePartitionwiseJoin !== null + ? Boolean(object.enablePartitionwiseJoin) + : undefined; + message.jit = + object.jit !== undefined && object.jit !== null + ? Boolean(object.jit) + : undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers !== undefined && + object.maxParallelMaintenanceWorkers !== null + ? Number(object.maxParallelMaintenanceWorkers) + : undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation !== undefined && + object.parallelLeaderParticipation !== null + ? Boolean(object.parallelLeaderParticipation) + : undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate !== undefined && + object.logTransactionSampleRate !== null + ? Number(object.logTransactionSampleRate) + : undefined; + message.planCacheMode = + object.planCacheMode !== undefined && object.planCacheMode !== null + ? postgresqlconfig151c_PlanCacheModeFromJSON(object.planCacheMode) + : 0; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + message.sharedPreloadLibraries = (object.sharedPreloadLibraries ?? []).map( + (e: any) => postgresqlconfig151c_SharedPreloadLibrariesFromJSON(e) + ); + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration !== undefined && + object.autoExplainLogMinDuration !== null + ? Number(object.autoExplainLogMinDuration) + : undefined; + message.autoExplainLogAnalyze = + object.autoExplainLogAnalyze !== undefined && + object.autoExplainLogAnalyze !== null + ? Boolean(object.autoExplainLogAnalyze) + : undefined; + message.autoExplainLogBuffers = + object.autoExplainLogBuffers !== undefined && + object.autoExplainLogBuffers !== null + ? Boolean(object.autoExplainLogBuffers) + : undefined; + message.autoExplainLogTiming = + object.autoExplainLogTiming !== undefined && + object.autoExplainLogTiming !== null + ? Boolean(object.autoExplainLogTiming) + : undefined; + message.autoExplainLogTriggers = + object.autoExplainLogTriggers !== undefined && + object.autoExplainLogTriggers !== null + ? Boolean(object.autoExplainLogTriggers) + : undefined; + message.autoExplainLogVerbose = + object.autoExplainLogVerbose !== undefined && + object.autoExplainLogVerbose !== null + ? Boolean(object.autoExplainLogVerbose) + : undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements !== undefined && + object.autoExplainLogNestedStatements !== null + ? Boolean(object.autoExplainLogNestedStatements) + : undefined; + message.autoExplainSampleRate = + object.autoExplainSampleRate !== undefined && + object.autoExplainSampleRate !== null + ? Number(object.autoExplainSampleRate) + : undefined; + message.pgHintPlanEnableHint = + object.pgHintPlanEnableHint !== undefined && + object.pgHintPlanEnableHint !== null + ? Boolean(object.pgHintPlanEnableHint) + : undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable !== undefined && + object.pgHintPlanEnableHintTable !== null + ? Boolean(object.pgHintPlanEnableHintTable) + : undefined; + message.pgHintPlanDebugPrint = + object.pgHintPlanDebugPrint !== undefined && + object.pgHintPlanDebugPrint !== null + ? postgresqlconfig151c_PgHintPlanDebugPrintFromJSON( + object.pgHintPlanDebugPrint + ) + : 0; + message.pgHintPlanMessageLevel = + object.pgHintPlanMessageLevel !== undefined && + object.pgHintPlanMessageLevel !== null + ? postgresqlconfig151c_LogLevelFromJSON(object.pgHintPlanMessageLevel) + : 0; + message.hashMemMultiplier = + object.hashMemMultiplier !== undefined && + object.hashMemMultiplier !== null + ? Number(object.hashMemMultiplier) + : undefined; + message.logicalDecodingWorkMem = + object.logicalDecodingWorkMem !== undefined && + object.logicalDecodingWorkMem !== null + ? Number(object.logicalDecodingWorkMem) + : undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency !== undefined && + object.maintenanceIoConcurrency !== null + ? Number(object.maintenanceIoConcurrency) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.walKeepSize = + object.walKeepSize !== undefined && object.walKeepSize !== null + ? Number(object.walKeepSize) + : undefined; + message.enableIncrementalSort = + object.enableIncrementalSort !== undefined && + object.enableIncrementalSort !== null + ? Boolean(object.enableIncrementalSort) + : undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold !== undefined && + object.autovacuumVacuumInsertThreshold !== null + ? Number(object.autovacuumVacuumInsertThreshold) + : undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor !== undefined && + object.autovacuumVacuumInsertScaleFactor !== null + ? Number(object.autovacuumVacuumInsertScaleFactor) + : undefined; + message.logMinDurationSample = + object.logMinDurationSample !== undefined && + object.logMinDurationSample !== null + ? Number(object.logMinDurationSample) + : undefined; + message.logStatementSampleRate = + object.logStatementSampleRate !== undefined && + object.logStatementSampleRate !== null + ? Number(object.logStatementSampleRate) + : undefined; + message.logParameterMaxLength = + object.logParameterMaxLength !== undefined && + object.logParameterMaxLength !== null + ? Number(object.logParameterMaxLength) + : undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError !== undefined && + object.logParameterMaxLengthOnError !== null + ? Number(object.logParameterMaxLengthOnError) + : undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval !== undefined && + object.clientConnectionCheckInterval !== null + ? Number(object.clientConnectionCheckInterval) + : undefined; + message.enableAsyncAppend = + object.enableAsyncAppend !== undefined && + object.enableAsyncAppend !== null + ? Boolean(object.enableAsyncAppend) + : undefined; + message.enableGathermerge = + object.enableGathermerge !== undefined && + object.enableGathermerge !== null + ? Boolean(object.enableGathermerge) + : undefined; + message.enableMemoize = + object.enableMemoize !== undefined && object.enableMemoize !== null + ? Boolean(object.enableMemoize) + : undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits !== undefined && + object.logRecoveryConflictWaits !== null + ? Boolean(object.logRecoveryConflictWaits) + : undefined; + message.vacuumFailsafeAge = + object.vacuumFailsafeAge !== undefined && + object.vacuumFailsafeAge !== null + ? Number(object.vacuumFailsafeAge) + : undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge !== undefined && + object.vacuumMultixactFailsafeAge !== null + ? Number(object.vacuumMultixactFailsafeAge) + : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; + message.plantunerFixEmptyTable = + object.plantunerFixEmptyTable !== undefined && + object.plantunerFixEmptyTable !== null + ? Boolean(object.plantunerFixEmptyTable) + : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.enableGroupByReordering = + object.enableGroupByReordering !== undefined && + object.enableGroupByReordering !== null + ? Boolean(object.enableGroupByReordering) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; + return message; + }, + + toJSON(message: Postgresqlconfig151c): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.maintenanceWorkMem !== undefined && + (obj.maintenanceWorkMem = message.maintenanceWorkMem); + message.autovacuumWorkMem !== undefined && + (obj.autovacuumWorkMem = message.autovacuumWorkMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.vacuumCostDelay !== undefined && + (obj.vacuumCostDelay = message.vacuumCostDelay); + message.vacuumCostPageHit !== undefined && + (obj.vacuumCostPageHit = message.vacuumCostPageHit); + message.vacuumCostPageMiss !== undefined && + (obj.vacuumCostPageMiss = message.vacuumCostPageMiss); + message.vacuumCostPageDirty !== undefined && + (obj.vacuumCostPageDirty = message.vacuumCostPageDirty); + message.vacuumCostLimit !== undefined && + (obj.vacuumCostLimit = message.vacuumCostLimit); + message.bgwriterDelay !== undefined && + (obj.bgwriterDelay = message.bgwriterDelay); + message.bgwriterLruMaxpages !== undefined && + (obj.bgwriterLruMaxpages = message.bgwriterLruMaxpages); + message.bgwriterLruMultiplier !== undefined && + (obj.bgwriterLruMultiplier = message.bgwriterLruMultiplier); + message.bgwriterFlushAfter !== undefined && + (obj.bgwriterFlushAfter = message.bgwriterFlushAfter); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.walLevel !== undefined && + (obj.walLevel = postgresqlconfig151c_WalLevelToJSON(message.walLevel)); + message.synchronousCommit !== undefined && + (obj.synchronousCommit = postgresqlconfig151c_SynchronousCommitToJSON( + message.synchronousCommit + )); + message.checkpointTimeout !== undefined && + (obj.checkpointTimeout = message.checkpointTimeout); + message.checkpointCompletionTarget !== undefined && + (obj.checkpointCompletionTarget = message.checkpointCompletionTarget); + message.checkpointFlushAfter !== undefined && + (obj.checkpointFlushAfter = message.checkpointFlushAfter); + message.maxWalSize !== undefined && (obj.maxWalSize = message.maxWalSize); + message.minWalSize !== undefined && (obj.minWalSize = message.minWalSize); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.defaultStatisticsTarget !== undefined && + (obj.defaultStatisticsTarget = message.defaultStatisticsTarget); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = postgresqlconfig151c_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlconfig151c_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlconfig151c_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlconfig151c_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlconfig151c_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlconfig151c_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlconfig151c_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlconfig151c_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlconfig151c_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlconfig151c_XmlBinaryToJSON(message.xmlbinary)); + message.xmloption !== undefined && + (obj.xmloption = postgresqlconfig151c_XmlOptionToJSON(message.xmloption)); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlconfig151c_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.autovacuumMaxWorkers !== undefined && + (obj.autovacuumMaxWorkers = message.autovacuumMaxWorkers); + message.autovacuumVacuumCostDelay !== undefined && + (obj.autovacuumVacuumCostDelay = message.autovacuumVacuumCostDelay); + message.autovacuumVacuumCostLimit !== undefined && + (obj.autovacuumVacuumCostLimit = message.autovacuumVacuumCostLimit); + message.autovacuumNaptime !== undefined && + (obj.autovacuumNaptime = message.autovacuumNaptime); + message.archiveTimeout !== undefined && + (obj.archiveTimeout = message.archiveTimeout); + message.trackActivityQuerySize !== undefined && + (obj.trackActivityQuerySize = message.trackActivityQuerySize); + message.onlineAnalyzeEnable !== undefined && + (obj.onlineAnalyzeEnable = message.onlineAnalyzeEnable); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxWorkerProcesses !== undefined && + (obj.maxWorkerProcesses = message.maxWorkerProcesses); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.autovacuumVacuumScaleFactor !== undefined && + (obj.autovacuumVacuumScaleFactor = message.autovacuumVacuumScaleFactor); + message.autovacuumAnalyzeScaleFactor !== undefined && + (obj.autovacuumAnalyzeScaleFactor = message.autovacuumAnalyzeScaleFactor); + message.defaultTransactionReadOnly !== undefined && + (obj.defaultTransactionReadOnly = message.defaultTransactionReadOnly); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.enableParallelAppend !== undefined && + (obj.enableParallelAppend = message.enableParallelAppend); + message.enableParallelHash !== undefined && + (obj.enableParallelHash = message.enableParallelHash); + message.enablePartitionPruning !== undefined && + (obj.enablePartitionPruning = message.enablePartitionPruning); + message.enablePartitionwiseAggregate !== undefined && + (obj.enablePartitionwiseAggregate = message.enablePartitionwiseAggregate); + message.enablePartitionwiseJoin !== undefined && + (obj.enablePartitionwiseJoin = message.enablePartitionwiseJoin); + message.jit !== undefined && (obj.jit = message.jit); + message.maxParallelMaintenanceWorkers !== undefined && + (obj.maxParallelMaintenanceWorkers = + message.maxParallelMaintenanceWorkers); + message.parallelLeaderParticipation !== undefined && + (obj.parallelLeaderParticipation = message.parallelLeaderParticipation); + message.logTransactionSampleRate !== undefined && + (obj.logTransactionSampleRate = message.logTransactionSampleRate); + message.planCacheMode !== undefined && + (obj.planCacheMode = postgresqlconfig151c_PlanCacheModeToJSON( + message.planCacheMode + )); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + if (message.sharedPreloadLibraries) { + obj.sharedPreloadLibraries = message.sharedPreloadLibraries.map((e) => + postgresqlconfig151c_SharedPreloadLibrariesToJSON(e) + ); + } else { + obj.sharedPreloadLibraries = []; + } + message.autoExplainLogMinDuration !== undefined && + (obj.autoExplainLogMinDuration = message.autoExplainLogMinDuration); + message.autoExplainLogAnalyze !== undefined && + (obj.autoExplainLogAnalyze = message.autoExplainLogAnalyze); + message.autoExplainLogBuffers !== undefined && + (obj.autoExplainLogBuffers = message.autoExplainLogBuffers); + message.autoExplainLogTiming !== undefined && + (obj.autoExplainLogTiming = message.autoExplainLogTiming); + message.autoExplainLogTriggers !== undefined && + (obj.autoExplainLogTriggers = message.autoExplainLogTriggers); + message.autoExplainLogVerbose !== undefined && + (obj.autoExplainLogVerbose = message.autoExplainLogVerbose); + message.autoExplainLogNestedStatements !== undefined && + (obj.autoExplainLogNestedStatements = + message.autoExplainLogNestedStatements); + message.autoExplainSampleRate !== undefined && + (obj.autoExplainSampleRate = message.autoExplainSampleRate); + message.pgHintPlanEnableHint !== undefined && + (obj.pgHintPlanEnableHint = message.pgHintPlanEnableHint); + message.pgHintPlanEnableHintTable !== undefined && + (obj.pgHintPlanEnableHintTable = message.pgHintPlanEnableHintTable); + message.pgHintPlanDebugPrint !== undefined && + (obj.pgHintPlanDebugPrint = + postgresqlconfig151c_PgHintPlanDebugPrintToJSON( + message.pgHintPlanDebugPrint + )); + message.pgHintPlanMessageLevel !== undefined && + (obj.pgHintPlanMessageLevel = postgresqlconfig151c_LogLevelToJSON( + message.pgHintPlanMessageLevel + )); + message.hashMemMultiplier !== undefined && + (obj.hashMemMultiplier = message.hashMemMultiplier); + message.logicalDecodingWorkMem !== undefined && + (obj.logicalDecodingWorkMem = message.logicalDecodingWorkMem); + message.maintenanceIoConcurrency !== undefined && + (obj.maintenanceIoConcurrency = message.maintenanceIoConcurrency); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.walKeepSize !== undefined && + (obj.walKeepSize = message.walKeepSize); + message.enableIncrementalSort !== undefined && + (obj.enableIncrementalSort = message.enableIncrementalSort); + message.autovacuumVacuumInsertThreshold !== undefined && + (obj.autovacuumVacuumInsertThreshold = + message.autovacuumVacuumInsertThreshold); + message.autovacuumVacuumInsertScaleFactor !== undefined && + (obj.autovacuumVacuumInsertScaleFactor = + message.autovacuumVacuumInsertScaleFactor); + message.logMinDurationSample !== undefined && + (obj.logMinDurationSample = message.logMinDurationSample); + message.logStatementSampleRate !== undefined && + (obj.logStatementSampleRate = message.logStatementSampleRate); + message.logParameterMaxLength !== undefined && + (obj.logParameterMaxLength = message.logParameterMaxLength); + message.logParameterMaxLengthOnError !== undefined && + (obj.logParameterMaxLengthOnError = message.logParameterMaxLengthOnError); + message.clientConnectionCheckInterval !== undefined && + (obj.clientConnectionCheckInterval = + message.clientConnectionCheckInterval); + message.enableAsyncAppend !== undefined && + (obj.enableAsyncAppend = message.enableAsyncAppend); + message.enableGathermerge !== undefined && + (obj.enableGathermerge = message.enableGathermerge); + message.enableMemoize !== undefined && + (obj.enableMemoize = message.enableMemoize); + message.logRecoveryConflictWaits !== undefined && + (obj.logRecoveryConflictWaits = message.logRecoveryConflictWaits); + message.vacuumFailsafeAge !== undefined && + (obj.vacuumFailsafeAge = message.vacuumFailsafeAge); + message.vacuumMultixactFailsafeAge !== undefined && + (obj.vacuumMultixactFailsafeAge = message.vacuumMultixactFailsafeAge); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.plantunerFixEmptyTable !== undefined && + (obj.plantunerFixEmptyTable = message.plantunerFixEmptyTable); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.enableGroupByReordering !== undefined && + (obj.enableGroupByReordering = message.enableGroupByReordering); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlconfig151c { + const message = { ...basePostgresqlconfig151c } as Postgresqlconfig151c; + message.maxConnections = object.maxConnections ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.workMem = object.workMem ?? undefined; + message.maintenanceWorkMem = object.maintenanceWorkMem ?? undefined; + message.autovacuumWorkMem = object.autovacuumWorkMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.vacuumCostDelay = object.vacuumCostDelay ?? undefined; + message.vacuumCostPageHit = object.vacuumCostPageHit ?? undefined; + message.vacuumCostPageMiss = object.vacuumCostPageMiss ?? undefined; + message.vacuumCostPageDirty = object.vacuumCostPageDirty ?? undefined; + message.vacuumCostLimit = object.vacuumCostLimit ?? undefined; + message.bgwriterDelay = object.bgwriterDelay ?? undefined; + message.bgwriterLruMaxpages = object.bgwriterLruMaxpages ?? undefined; + message.bgwriterLruMultiplier = object.bgwriterLruMultiplier ?? undefined; + message.bgwriterFlushAfter = object.bgwriterFlushAfter ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.walLevel = object.walLevel ?? 0; + message.synchronousCommit = object.synchronousCommit ?? 0; + message.checkpointTimeout = object.checkpointTimeout ?? undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget ?? undefined; + message.checkpointFlushAfter = object.checkpointFlushAfter ?? undefined; + message.maxWalSize = object.maxWalSize ?? undefined; + message.minWalSize = object.minWalSize ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.autovacuumMaxWorkers = object.autovacuumMaxWorkers ?? undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay ?? undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit ?? undefined; + message.autovacuumNaptime = object.autovacuumNaptime ?? undefined; + message.archiveTimeout = object.archiveTimeout ?? undefined; + message.trackActivityQuerySize = object.trackActivityQuerySize ?? undefined; + message.onlineAnalyzeEnable = object.onlineAnalyzeEnable ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxWorkerProcesses = object.maxWorkerProcesses ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor ?? undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor ?? undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly ?? undefined; + message.timezone = object.timezone ?? ""; + message.enableParallelAppend = object.enableParallelAppend ?? undefined; + message.enableParallelHash = object.enableParallelHash ?? undefined; + message.enablePartitionPruning = object.enablePartitionPruning ?? undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate ?? undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin ?? undefined; + message.jit = object.jit ?? undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers ?? undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation ?? undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate ?? undefined; + message.planCacheMode = object.planCacheMode ?? 0; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + message.sharedPreloadLibraries = + object.sharedPreloadLibraries?.map((e) => e) || []; + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration ?? undefined; + message.autoExplainLogAnalyze = object.autoExplainLogAnalyze ?? undefined; + message.autoExplainLogBuffers = object.autoExplainLogBuffers ?? undefined; + message.autoExplainLogTiming = object.autoExplainLogTiming ?? undefined; + message.autoExplainLogTriggers = object.autoExplainLogTriggers ?? undefined; + message.autoExplainLogVerbose = object.autoExplainLogVerbose ?? undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements ?? undefined; + message.autoExplainSampleRate = object.autoExplainSampleRate ?? undefined; + message.pgHintPlanEnableHint = object.pgHintPlanEnableHint ?? undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable ?? undefined; + message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; + message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.hashMemMultiplier = object.hashMemMultiplier ?? undefined; + message.logicalDecodingWorkMem = object.logicalDecodingWorkMem ?? undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.walKeepSize = object.walKeepSize ?? undefined; + message.enableIncrementalSort = object.enableIncrementalSort ?? undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold ?? undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor ?? undefined; + message.logMinDurationSample = object.logMinDurationSample ?? undefined; + message.logStatementSampleRate = object.logStatementSampleRate ?? undefined; + message.logParameterMaxLength = object.logParameterMaxLength ?? undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError ?? undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval ?? undefined; + message.enableAsyncAppend = object.enableAsyncAppend ?? undefined; + message.enableGathermerge = object.enableGathermerge ?? undefined; + message.enableMemoize = object.enableMemoize ?? undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits ?? undefined; + message.vacuumFailsafeAge = object.vacuumFailsafeAge ?? undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.plantunerFixEmptyTable = object.plantunerFixEmptyTable ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.enableGroupByReordering = + object.enableGroupByReordering ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Postgresqlconfig151c.$type, Postgresqlconfig151c); + +const basePostgresqlconfigset151c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet15_1C", +}; + +export const Postgresqlconfigset151c = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet15_1C" as const, + + encode( + message: Postgresqlconfigset151c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Postgresqlconfig151c.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Postgresqlconfig151c.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Postgresqlconfig151c.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlconfigset151c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePostgresqlconfigset151c, + } as Postgresqlconfigset151c; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Postgresqlconfig151c.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Postgresqlconfig151c.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Postgresqlconfig151c.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlconfigset151c { + const message = { + ...basePostgresqlconfigset151c, + } as Postgresqlconfigset151c; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Postgresqlconfig151c.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Postgresqlconfig151c.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Postgresqlconfig151c.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Postgresqlconfigset151c): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Postgresqlconfig151c.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Postgresqlconfig151c.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Postgresqlconfig151c.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlconfigset151c { + const message = { + ...basePostgresqlconfigset151c, + } as Postgresqlconfigset151c; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Postgresqlconfig151c.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Postgresqlconfig151c.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Postgresqlconfig151c.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Postgresqlconfigset151c.$type, Postgresqlconfigset151c); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql16.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql16.ts new file mode 100644 index 00000000..7da64e34 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql16.ts @@ -0,0 +1,4627 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface PostgresqlConfig16 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig16"; + maxConnections?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + maxPreparedTransactions?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + maintenanceWorkMem?: number; + /** in bytes. */ + autovacuumWorkMem?: number; + /** in bytes. */ + tempFileLimit?: number; + /** in milliseconds. */ + vacuumCostDelay?: number; + vacuumCostPageHit?: number; + vacuumCostPageMiss?: number; + vacuumCostPageDirty?: number; + vacuumCostLimit?: number; + /** in milliseconds. */ + bgwriterDelay?: number; + bgwriterLruMaxpages?: number; + bgwriterLruMultiplier?: number; + bgwriterFlushAfter?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + walLevel: PostgresqlConfig16_WalLevel; + synchronousCommit: PostgresqlConfig16_SynchronousCommit; + /** in milliseconds. */ + checkpointTimeout?: number; + checkpointCompletionTarget?: number; + checkpointFlushAfter?: number; + /** in bytes. */ + maxWalSize?: number; + /** in bytes. */ + minWalSize?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + defaultStatisticsTarget?: number; + constraintExclusion: PostgresqlConfig16_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + debugParallelQuery: PostgresqlConfig16_DebugParallelQuery; + clientMinMessages: PostgresqlConfig16_LogLevel; + logMinMessages: PostgresqlConfig16_LogLevel; + logMinErrorStatement: PostgresqlConfig16_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: PostgresqlConfig16_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: PostgresqlConfig16_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: PostgresqlConfig16_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: PostgresqlConfig16_ByteaOutput; + xmlbinary: PostgresqlConfig16_XmlBinary; + xmloption: PostgresqlConfig16_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: PostgresqlConfig16_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + autovacuumMaxWorkers?: number; + autovacuumVacuumCostDelay?: number; + autovacuumVacuumCostLimit?: number; + /** in milliseconds. */ + autovacuumNaptime?: number; + /** in milliseconds. */ + archiveTimeout?: number; + trackActivityQuerySize?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxWorkerProcesses?: number; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + autovacuumVacuumScaleFactor?: number; + autovacuumAnalyzeScaleFactor?: number; + defaultTransactionReadOnly?: boolean; + timezone: string; + enableParallelAppend?: boolean; + enableParallelHash?: boolean; + enablePartitionPruning?: boolean; + enablePartitionwiseAggregate?: boolean; + enablePartitionwiseJoin?: boolean; + jit?: boolean; + maxParallelMaintenanceWorkers?: number; + parallelLeaderParticipation?: boolean; + logTransactionSampleRate?: number; + planCacheMode: PostgresqlConfig16_PlanCacheMode; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; + sharedPreloadLibraries: PostgresqlConfig16_SharedPreloadLibraries[]; + /** in milliseconds. */ + autoExplainLogMinDuration?: number; + autoExplainLogAnalyze?: boolean; + autoExplainLogBuffers?: boolean; + autoExplainLogTiming?: boolean; + autoExplainLogTriggers?: boolean; + autoExplainLogVerbose?: boolean; + autoExplainLogNestedStatements?: boolean; + autoExplainSampleRate?: number; + pgHintPlanEnableHint?: boolean; + pgHintPlanEnableHintTable?: boolean; + pgHintPlanDebugPrint: PostgresqlConfig16_PgHintPlanDebugPrint; + pgHintPlanMessageLevel: PostgresqlConfig16_LogLevel; + hashMemMultiplier?: number; + /** in bytes. */ + logicalDecodingWorkMem?: number; + maintenanceIoConcurrency?: number; + /** in bytes. */ + maxSlotWalKeepSize?: number; + /** in bytes. */ + walKeepSize?: number; + enableIncrementalSort?: boolean; + autovacuumVacuumInsertThreshold?: number; + autovacuumVacuumInsertScaleFactor?: number; + /** in milliseconds. */ + logMinDurationSample?: number; + logStatementSampleRate?: number; + /** in bytes. */ + logParameterMaxLength?: number; + /** in bytes. */ + logParameterMaxLengthOnError?: number; + /** in milliseconds. */ + clientConnectionCheckInterval?: number; + enableAsyncAppend?: boolean; + enableGathermerge?: boolean; + enableMemoize?: boolean; + /** in milliseconds. */ + logRecoveryConflictWaits?: boolean; + /** in milliseconds. */ + vacuumFailsafeAge?: number; + /** in milliseconds. */ + vacuumMultixactFailsafeAge?: number; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; + /** in bytes. */ + maxStackDepth?: number; + enableGroupByReordering?: boolean; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; +} + +export enum PostgresqlConfig16_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_BackslashQuoteFromJSON( + object: any +): PostgresqlConfig16_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlConfig16_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlConfig16_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlConfig16_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlConfig16_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlConfig16_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_BackslashQuoteToJSON( + object: PostgresqlConfig16_BackslashQuote +): string { + switch (object) { + case PostgresqlConfig16_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlConfig16_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlConfig16_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlConfig16_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlConfig16_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_ByteaOutputFromJSON( + object: any +): PostgresqlConfig16_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlConfig16_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlConfig16_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlConfig16_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_ByteaOutputToJSON( + object: PostgresqlConfig16_ByteaOutput +): string { + switch (object) { + case PostgresqlConfig16_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlConfig16_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlConfig16_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_ConstraintExclusionFromJSON( + object: any +): PostgresqlConfig16_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return PostgresqlConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return PostgresqlConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return PostgresqlConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return PostgresqlConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_ConstraintExclusionToJSON( + object: PostgresqlConfig16_ConstraintExclusion +): string { + switch (object) { + case PostgresqlConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case PostgresqlConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case PostgresqlConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case PostgresqlConfig16_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_DebugParallelQuery { + DEBUG_PARALLEL_QUERY_UNSPECIFIED = 0, + DEBUG_PARALLEL_QUERY_ON = 1, + DEBUG_PARALLEL_QUERY_OFF = 2, + DEBUG_PARALLEL_QUERY_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_DebugParallelQueryFromJSON( + object: any +): PostgresqlConfig16_DebugParallelQuery { + switch (object) { + case 0: + case "DEBUG_PARALLEL_QUERY_UNSPECIFIED": + return PostgresqlConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_UNSPECIFIED; + case 1: + case "DEBUG_PARALLEL_QUERY_ON": + return PostgresqlConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_ON; + case 2: + case "DEBUG_PARALLEL_QUERY_OFF": + return PostgresqlConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_OFF; + case 3: + case "DEBUG_PARALLEL_QUERY_REGRESS": + return PostgresqlConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_DebugParallelQuery.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_DebugParallelQueryToJSON( + object: PostgresqlConfig16_DebugParallelQuery +): string { + switch (object) { + case PostgresqlConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_UNSPECIFIED: + return "DEBUG_PARALLEL_QUERY_UNSPECIFIED"; + case PostgresqlConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_ON: + return "DEBUG_PARALLEL_QUERY_ON"; + case PostgresqlConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_OFF: + return "DEBUG_PARALLEL_QUERY_OFF"; + case PostgresqlConfig16_DebugParallelQuery.DEBUG_PARALLEL_QUERY_REGRESS: + return "DEBUG_PARALLEL_QUERY_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_LogErrorVerbosityFromJSON( + object: any +): PostgresqlConfig16_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_LogErrorVerbosityToJSON( + object: PostgresqlConfig16_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlConfig16_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_INFO = 12, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_LogLevelFromJSON( + object: any +): PostgresqlConfig16_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_DEBUG1; + case 12: + case "LOG_LEVEL_INFO": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_INFO; + case 6: + case "LOG_LEVEL_LOG": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return PostgresqlConfig16_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_LogLevelToJSON( + object: PostgresqlConfig16_LogLevel +): string { + switch (object) { + case PostgresqlConfig16_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_INFO: + return "LOG_LEVEL_INFO"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case PostgresqlConfig16_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_LogStatementFromJSON( + object: any +): PostgresqlConfig16_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return PostgresqlConfig16_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return PostgresqlConfig16_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return PostgresqlConfig16_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return PostgresqlConfig16_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return PostgresqlConfig16_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_LogStatementToJSON( + object: PostgresqlConfig16_LogStatement +): string { + switch (object) { + case PostgresqlConfig16_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case PostgresqlConfig16_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case PostgresqlConfig16_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case PostgresqlConfig16_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case PostgresqlConfig16_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_PgHintPlanDebugPrintFromJSON( + object: any +): PostgresqlConfig16_PgHintPlanDebugPrint { + switch (object) { + case 0: + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return PostgresqlConfig16_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case 1: + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return PostgresqlConfig16_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case 2: + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return PostgresqlConfig16_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; + case 3: + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return PostgresqlConfig16_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; + case 4: + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return PostgresqlConfig16_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_PgHintPlanDebugPrint.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_PgHintPlanDebugPrintToJSON( + object: PostgresqlConfig16_PgHintPlanDebugPrint +): string { + switch (object) { + case PostgresqlConfig16_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case PostgresqlConfig16_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case PostgresqlConfig16_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case PostgresqlConfig16_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case PostgresqlConfig16_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_PlanCacheModeFromJSON( + object: any +): PostgresqlConfig16_PlanCacheMode { + switch (object) { + case 0: + case "PLAN_CACHE_MODE_UNSPECIFIED": + return PostgresqlConfig16_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case 1: + case "PLAN_CACHE_MODE_AUTO": + return PostgresqlConfig16_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case 2: + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return PostgresqlConfig16_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return PostgresqlConfig16_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_PlanCacheMode.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_PlanCacheModeToJSON( + object: PostgresqlConfig16_PlanCacheMode +): string { + switch (object) { + case PostgresqlConfig16_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case PostgresqlConfig16_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case PostgresqlConfig16_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case PostgresqlConfig16_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_SharedPreloadLibrariesFromJSON( + object: any +): PostgresqlConfig16_SharedPreloadLibraries { + switch (object) { + case 0: + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case 1: + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case 2: + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_SharedPreloadLibraries.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_SharedPreloadLibrariesToJSON( + object: PostgresqlConfig16_SharedPreloadLibraries +): string { + switch (object) { + case PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case PostgresqlConfig16_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_SynchronousCommitFromJSON( + object: any +): PostgresqlConfig16_SynchronousCommit { + switch (object) { + case 0: + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case 1: + case "SYNCHRONOUS_COMMIT_ON": + return PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case 2: + case "SYNCHRONOUS_COMMIT_OFF": + return PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_SynchronousCommit.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_SynchronousCommitToJSON( + object: PostgresqlConfig16_SynchronousCommit +): string { + switch (object) { + case PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case PostgresqlConfig16_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_TransactionIsolationFromJSON( + object: any +): PostgresqlConfig16_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return PostgresqlConfig16_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return PostgresqlConfig16_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return PostgresqlConfig16_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return PostgresqlConfig16_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return PostgresqlConfig16_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_TransactionIsolationToJSON( + object: PostgresqlConfig16_TransactionIsolation +): string { + switch (object) { + case PostgresqlConfig16_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case PostgresqlConfig16_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case PostgresqlConfig16_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case PostgresqlConfig16_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case PostgresqlConfig16_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_WalLevelFromJSON( + object: any +): PostgresqlConfig16_WalLevel { + switch (object) { + case 0: + case "WAL_LEVEL_UNSPECIFIED": + return PostgresqlConfig16_WalLevel.WAL_LEVEL_UNSPECIFIED; + case 1: + case "WAL_LEVEL_REPLICA": + return PostgresqlConfig16_WalLevel.WAL_LEVEL_REPLICA; + case 2: + case "WAL_LEVEL_LOGICAL": + return PostgresqlConfig16_WalLevel.WAL_LEVEL_LOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_WalLevel.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_WalLevelToJSON( + object: PostgresqlConfig16_WalLevel +): string { + switch (object) { + case PostgresqlConfig16_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case PostgresqlConfig16_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case PostgresqlConfig16_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_XmlBinaryFromJSON( + object: any +): PostgresqlConfig16_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return PostgresqlConfig16_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return PostgresqlConfig16_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return PostgresqlConfig16_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_XmlBinaryToJSON( + object: PostgresqlConfig16_XmlBinary +): string { + switch (object) { + case PostgresqlConfig16_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case PostgresqlConfig16_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case PostgresqlConfig16_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig16_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig16_XmlOptionFromJSON( + object: any +): PostgresqlConfig16_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return PostgresqlConfig16_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return PostgresqlConfig16_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return PostgresqlConfig16_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig16_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlConfig16_XmlOptionToJSON( + object: PostgresqlConfig16_XmlOption +): string { + switch (object) { + case PostgresqlConfig16_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case PostgresqlConfig16_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case PostgresqlConfig16_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +export interface PostgresqlConfigSet16 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet16"; + /** + * Effective settings for a PostgreSQL 16 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: PostgresqlConfig16; + /** User-defined settings for a PostgreSQL 16 cluster. */ + userConfig?: PostgresqlConfig16; + /** Default configuration for a PostgreSQL 16 cluster. */ + defaultConfig?: PostgresqlConfig16; +} + +const basePostgresqlConfig16: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig16", + walLevel: 0, + synchronousCommit: 0, + constraintExclusion: 0, + debugParallelQuery: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", + planCacheMode: 0, + sharedPreloadLibraries: 0, + pgHintPlanDebugPrint: 0, + pgHintPlanMessageLevel: 0, +}; + +export const PostgresqlConfig16 = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig16" as const, + + encode( + message: PostgresqlConfig16, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maintenanceWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceWorkMem!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.autovacuumWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumWorkMem!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.vacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostDelay!, + }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.vacuumCostPageHit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageHit!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.vacuumCostPageMiss !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageMiss!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.vacuumCostPageDirty !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageDirty!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.vacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostLimit!, + }, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.bgwriterDelay !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.bgwriterDelay! }, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.bgwriterLruMaxpages !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterLruMaxpages!, + }, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.bgwriterLruMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.bgwriterLruMultiplier!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.bgwriterFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterFlushAfter!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.walLevel !== 0) { + writer.uint32(160).int32(message.walLevel); + } + if (message.synchronousCommit !== 0) { + writer.uint32(168).int32(message.synchronousCommit); + } + if (message.checkpointTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointTimeout!, + }, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.checkpointCompletionTarget !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.checkpointCompletionTarget!, + }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.checkpointFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointFlushAfter!, + }, + writer.uint32(194).fork() + ).ldelim(); + } + if (message.maxWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxWalSize! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.minWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.minWalSize! }, + writer.uint32(210).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultStatisticsTarget !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.defaultStatisticsTarget!, + }, + writer.uint32(226).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(232).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(258).fork() + ).ldelim(); + } + if (message.debugParallelQuery !== 0) { + writer.uint32(264).int32(message.debugParallelQuery); + } + if (message.clientMinMessages !== 0) { + writer.uint32(272).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(280).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(288).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(322).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(336).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(352).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(370).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(384).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(416).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(424).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(432).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(480).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(554).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(562).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(570).fork() + ).ldelim(); + } + if (message.autovacuumMaxWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumMaxWorkers!, + }, + writer.uint32(578).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostDelay!, + }, + writer.uint32(586).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostLimit!, + }, + writer.uint32(594).fork() + ).ldelim(); + } + if (message.autovacuumNaptime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumNaptime!, + }, + writer.uint32(602).fork() + ).ldelim(); + } + if (message.archiveTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.archiveTimeout! }, + writer.uint32(610).fork() + ).ldelim(); + } + if (message.trackActivityQuerySize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.trackActivityQuerySize!, + }, + writer.uint32(618).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(642).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(650).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(658).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(666).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(674).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(682).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(690).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(698).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(706).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(714).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(722).fork() + ).ldelim(); + } + if (message.maxWorkerProcesses !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxWorkerProcesses!, + }, + writer.uint32(730).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(738).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(746).fork() + ).ldelim(); + } + if (message.autovacuumVacuumScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumScaleFactor!, + }, + writer.uint32(754).fork() + ).ldelim(); + } + if (message.autovacuumAnalyzeScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumAnalyzeScaleFactor!, + }, + writer.uint32(762).fork() + ).ldelim(); + } + if (message.defaultTransactionReadOnly !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.defaultTransactionReadOnly!, + }, + writer.uint32(770).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(778).string(message.timezone); + } + if (message.enableParallelAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelAppend!, + }, + writer.uint32(786).fork() + ).ldelim(); + } + if (message.enableParallelHash !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelHash!, + }, + writer.uint32(794).fork() + ).ldelim(); + } + if (message.enablePartitionPruning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionPruning!, + }, + writer.uint32(802).fork() + ).ldelim(); + } + if (message.enablePartitionwiseAggregate !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseAggregate!, + }, + writer.uint32(810).fork() + ).ldelim(); + } + if (message.enablePartitionwiseJoin !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseJoin!, + }, + writer.uint32(818).fork() + ).ldelim(); + } + if (message.jit !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.jit! }, + writer.uint32(826).fork() + ).ldelim(); + } + if (message.maxParallelMaintenanceWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelMaintenanceWorkers!, + }, + writer.uint32(834).fork() + ).ldelim(); + } + if (message.parallelLeaderParticipation !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.parallelLeaderParticipation!, + }, + writer.uint32(842).fork() + ).ldelim(); + } + if (message.logTransactionSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logTransactionSampleRate!, + }, + writer.uint32(858).fork() + ).ldelim(); + } + if (message.planCacheMode !== 0) { + writer.uint32(864).int32(message.planCacheMode); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(874).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(882).fork() + ).ldelim(); + } + writer.uint32(890).fork(); + for (const v of message.sharedPreloadLibraries) { + writer.int32(v); + } + writer.ldelim(); + if (message.autoExplainLogMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autoExplainLogMinDuration!, + }, + writer.uint32(898).fork() + ).ldelim(); + } + if (message.autoExplainLogAnalyze !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogAnalyze!, + }, + writer.uint32(906).fork() + ).ldelim(); + } + if (message.autoExplainLogBuffers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogBuffers!, + }, + writer.uint32(914).fork() + ).ldelim(); + } + if (message.autoExplainLogTiming !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTiming!, + }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.autoExplainLogTriggers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTriggers!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.autoExplainLogVerbose !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogVerbose!, + }, + writer.uint32(938).fork() + ).ldelim(); + } + if (message.autoExplainLogNestedStatements !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogNestedStatements!, + }, + writer.uint32(946).fork() + ).ldelim(); + } + if (message.autoExplainSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autoExplainSampleRate!, + }, + writer.uint32(954).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHint !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHint!, + }, + writer.uint32(962).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHintTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHintTable!, + }, + writer.uint32(970).fork() + ).ldelim(); + } + if (message.pgHintPlanDebugPrint !== 0) { + writer.uint32(976).int32(message.pgHintPlanDebugPrint); + } + if (message.pgHintPlanMessageLevel !== 0) { + writer.uint32(984).int32(message.pgHintPlanMessageLevel); + } + if (message.hashMemMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.hashMemMultiplier!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.logicalDecodingWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logicalDecodingWorkMem!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.maintenanceIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceIoConcurrency!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } + if (message.walKeepSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.walKeepSize! }, + writer.uint32(1034).fork() + ).ldelim(); + } + if (message.enableIncrementalSort !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIncrementalSort!, + }, + writer.uint32(1042).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumInsertThreshold!, + }, + writer.uint32(1050).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumInsertScaleFactor!, + }, + writer.uint32(1058).fork() + ).ldelim(); + } + if (message.logMinDurationSample !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationSample!, + }, + writer.uint32(1066).fork() + ).ldelim(); + } + if (message.logStatementSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logStatementSampleRate!, + }, + writer.uint32(1074).fork() + ).ldelim(); + } + if (message.logParameterMaxLength !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLength!, + }, + writer.uint32(1082).fork() + ).ldelim(); + } + if (message.logParameterMaxLengthOnError !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLengthOnError!, + }, + writer.uint32(1090).fork() + ).ldelim(); + } + if (message.clientConnectionCheckInterval !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.clientConnectionCheckInterval!, + }, + writer.uint32(1098).fork() + ).ldelim(); + } + if (message.enableAsyncAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableAsyncAppend!, + }, + writer.uint32(1106).fork() + ).ldelim(); + } + if (message.enableGathermerge !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableGathermerge!, + }, + writer.uint32(1114).fork() + ).ldelim(); + } + if (message.enableMemoize !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMemoize! }, + writer.uint32(1122).fork() + ).ldelim(); + } + if (message.logRecoveryConflictWaits !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logRecoveryConflictWaits!, + }, + writer.uint32(1130).fork() + ).ldelim(); + } + if (message.vacuumFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumFailsafeAge!, + }, + writer.uint32(1138).fork() + ).ldelim(); + } + if (message.vacuumMultixactFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumMultixactFailsafeAge!, + }, + writer.uint32(1146).fork() + ).ldelim(); + } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(1154).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1162).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1170).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1178).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1186).fork() + ).ldelim(); + } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.enableGroupByReordering !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableGroupByReordering!, + }, + writer.uint32(1210).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PostgresqlConfig16 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlConfig16 } as PostgresqlConfig16; + message.sharedPreloadLibraries = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 6: + message.maintenanceWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.autovacuumWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.vacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.vacuumCostPageHit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.vacuumCostPageMiss = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.vacuumCostPageDirty = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.vacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 14: + message.bgwriterDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 15: + message.bgwriterLruMaxpages = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 16: + message.bgwriterLruMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.bgwriterFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.walLevel = reader.int32() as any; + break; + case 21: + message.synchronousCommit = reader.int32() as any; + break; + case 22: + message.checkpointTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 23: + message.checkpointCompletionTarget = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.checkpointFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 25: + message.maxWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 26: + message.minWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 27: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 28: + message.defaultStatisticsTarget = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 29: + message.constraintExclusion = reader.int32() as any; + break; + case 30: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 33: + message.debugParallelQuery = reader.int32() as any; + break; + case 34: + message.clientMinMessages = reader.int32() as any; + break; + case 35: + message.logMinMessages = reader.int32() as any; + break; + case 36: + message.logMinErrorStatement = reader.int32() as any; + break; + case 37: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 40: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 41: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 42: + message.logErrorVerbosity = reader.int32() as any; + break; + case 43: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 44: + message.logStatement = reader.int32() as any; + break; + case 45: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.searchPath = reader.string(); + break; + case 47: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 48: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 49: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 50: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 52: + message.byteaOutput = reader.int32() as any; + break; + case 53: + message.xmlbinary = reader.int32() as any; + break; + case 54: + message.xmloption = reader.int32() as any; + break; + case 55: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 60: + message.backslashQuote = reader.int32() as any; + break; + case 61: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 70: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 71: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 72: + message.autovacuumMaxWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 73: + message.autovacuumVacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 74: + message.autovacuumVacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 75: + message.autovacuumNaptime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 76: + message.archiveTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 77: + message.trackActivityQuerySize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 80: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 81: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 82: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 83: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 84: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 85: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 86: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 87: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 88: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 89: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 90: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 91: + message.maxWorkerProcesses = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 92: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 93: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 94: + message.autovacuumVacuumScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 95: + message.autovacuumAnalyzeScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 96: + message.defaultTransactionReadOnly = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 97: + message.timezone = reader.string(); + break; + case 98: + message.enableParallelAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 99: + message.enableParallelHash = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 100: + message.enablePartitionPruning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 101: + message.enablePartitionwiseAggregate = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 102: + message.enablePartitionwiseJoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 103: + message.jit = BoolValue.decode(reader, reader.uint32()).value; + break; + case 104: + message.maxParallelMaintenanceWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 105: + message.parallelLeaderParticipation = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 107: + message.logTransactionSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 108: + message.planCacheMode = reader.int32() as any; + break; + case 109: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 110: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 111: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + } else { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + break; + case 112: + message.autoExplainLogMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 113: + message.autoExplainLogAnalyze = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 114: + message.autoExplainLogBuffers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 115: + message.autoExplainLogTiming = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.autoExplainLogTriggers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.autoExplainLogVerbose = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 118: + message.autoExplainLogNestedStatements = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 119: + message.autoExplainSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 120: + message.pgHintPlanEnableHint = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 121: + message.pgHintPlanEnableHintTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 122: + message.pgHintPlanDebugPrint = reader.int32() as any; + break; + case 123: + message.pgHintPlanMessageLevel = reader.int32() as any; + break; + case 124: + message.hashMemMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.logicalDecodingWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.maintenanceIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 129: + message.walKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 130: + message.enableIncrementalSort = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 131: + message.autovacuumVacuumInsertThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 132: + message.autovacuumVacuumInsertScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 133: + message.logMinDurationSample = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 134: + message.logStatementSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 135: + message.logParameterMaxLength = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 136: + message.logParameterMaxLengthOnError = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 137: + message.clientConnectionCheckInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 138: + message.enableAsyncAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 139: + message.enableGathermerge = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 140: + message.enableMemoize = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 141: + message.logRecoveryConflictWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 142: + message.vacuumFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 143: + message.vacuumMultixactFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 144: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 145: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 146: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 147: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 148: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 151: + message.enableGroupByReordering = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlConfig16 { + const message = { ...basePostgresqlConfig16 } as PostgresqlConfig16; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.maintenanceWorkMem = + object.maintenanceWorkMem !== undefined && + object.maintenanceWorkMem !== null + ? Number(object.maintenanceWorkMem) + : undefined; + message.autovacuumWorkMem = + object.autovacuumWorkMem !== undefined && + object.autovacuumWorkMem !== null + ? Number(object.autovacuumWorkMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.vacuumCostDelay = + object.vacuumCostDelay !== undefined && object.vacuumCostDelay !== null + ? Number(object.vacuumCostDelay) + : undefined; + message.vacuumCostPageHit = + object.vacuumCostPageHit !== undefined && + object.vacuumCostPageHit !== null + ? Number(object.vacuumCostPageHit) + : undefined; + message.vacuumCostPageMiss = + object.vacuumCostPageMiss !== undefined && + object.vacuumCostPageMiss !== null + ? Number(object.vacuumCostPageMiss) + : undefined; + message.vacuumCostPageDirty = + object.vacuumCostPageDirty !== undefined && + object.vacuumCostPageDirty !== null + ? Number(object.vacuumCostPageDirty) + : undefined; + message.vacuumCostLimit = + object.vacuumCostLimit !== undefined && object.vacuumCostLimit !== null + ? Number(object.vacuumCostLimit) + : undefined; + message.bgwriterDelay = + object.bgwriterDelay !== undefined && object.bgwriterDelay !== null + ? Number(object.bgwriterDelay) + : undefined; + message.bgwriterLruMaxpages = + object.bgwriterLruMaxpages !== undefined && + object.bgwriterLruMaxpages !== null + ? Number(object.bgwriterLruMaxpages) + : undefined; + message.bgwriterLruMultiplier = + object.bgwriterLruMultiplier !== undefined && + object.bgwriterLruMultiplier !== null + ? Number(object.bgwriterLruMultiplier) + : undefined; + message.bgwriterFlushAfter = + object.bgwriterFlushAfter !== undefined && + object.bgwriterFlushAfter !== null + ? Number(object.bgwriterFlushAfter) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.walLevel = + object.walLevel !== undefined && object.walLevel !== null + ? postgresqlConfig16_WalLevelFromJSON(object.walLevel) + : 0; + message.synchronousCommit = + object.synchronousCommit !== undefined && + object.synchronousCommit !== null + ? postgresqlConfig16_SynchronousCommitFromJSON(object.synchronousCommit) + : 0; + message.checkpointTimeout = + object.checkpointTimeout !== undefined && + object.checkpointTimeout !== null + ? Number(object.checkpointTimeout) + : undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget !== undefined && + object.checkpointCompletionTarget !== null + ? Number(object.checkpointCompletionTarget) + : undefined; + message.checkpointFlushAfter = + object.checkpointFlushAfter !== undefined && + object.checkpointFlushAfter !== null + ? Number(object.checkpointFlushAfter) + : undefined; + message.maxWalSize = + object.maxWalSize !== undefined && object.maxWalSize !== null + ? Number(object.maxWalSize) + : undefined; + message.minWalSize = + object.minWalSize !== undefined && object.minWalSize !== null + ? Number(object.minWalSize) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget !== undefined && + object.defaultStatisticsTarget !== null + ? Number(object.defaultStatisticsTarget) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlConfig16_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.debugParallelQuery = + object.debugParallelQuery !== undefined && + object.debugParallelQuery !== null + ? postgresqlConfig16_DebugParallelQueryFromJSON( + object.debugParallelQuery + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlConfig16_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlConfig16_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlConfig16_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlConfig16_LogErrorVerbosityFromJSON(object.logErrorVerbosity) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlConfig16_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlConfig16_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlConfig16_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlConfig16_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlConfig16_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlConfig16_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.autovacuumMaxWorkers = + object.autovacuumMaxWorkers !== undefined && + object.autovacuumMaxWorkers !== null + ? Number(object.autovacuumMaxWorkers) + : undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay !== undefined && + object.autovacuumVacuumCostDelay !== null + ? Number(object.autovacuumVacuumCostDelay) + : undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit !== undefined && + object.autovacuumVacuumCostLimit !== null + ? Number(object.autovacuumVacuumCostLimit) + : undefined; + message.autovacuumNaptime = + object.autovacuumNaptime !== undefined && + object.autovacuumNaptime !== null + ? Number(object.autovacuumNaptime) + : undefined; + message.archiveTimeout = + object.archiveTimeout !== undefined && object.archiveTimeout !== null + ? Number(object.archiveTimeout) + : undefined; + message.trackActivityQuerySize = + object.trackActivityQuerySize !== undefined && + object.trackActivityQuerySize !== null + ? Number(object.trackActivityQuerySize) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxWorkerProcesses = + object.maxWorkerProcesses !== undefined && + object.maxWorkerProcesses !== null + ? Number(object.maxWorkerProcesses) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor !== undefined && + object.autovacuumVacuumScaleFactor !== null + ? Number(object.autovacuumVacuumScaleFactor) + : undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor !== undefined && + object.autovacuumAnalyzeScaleFactor !== null + ? Number(object.autovacuumAnalyzeScaleFactor) + : undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly !== undefined && + object.defaultTransactionReadOnly !== null + ? Boolean(object.defaultTransactionReadOnly) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.enableParallelAppend = + object.enableParallelAppend !== undefined && + object.enableParallelAppend !== null + ? Boolean(object.enableParallelAppend) + : undefined; + message.enableParallelHash = + object.enableParallelHash !== undefined && + object.enableParallelHash !== null + ? Boolean(object.enableParallelHash) + : undefined; + message.enablePartitionPruning = + object.enablePartitionPruning !== undefined && + object.enablePartitionPruning !== null + ? Boolean(object.enablePartitionPruning) + : undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate !== undefined && + object.enablePartitionwiseAggregate !== null + ? Boolean(object.enablePartitionwiseAggregate) + : undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin !== undefined && + object.enablePartitionwiseJoin !== null + ? Boolean(object.enablePartitionwiseJoin) + : undefined; + message.jit = + object.jit !== undefined && object.jit !== null + ? Boolean(object.jit) + : undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers !== undefined && + object.maxParallelMaintenanceWorkers !== null + ? Number(object.maxParallelMaintenanceWorkers) + : undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation !== undefined && + object.parallelLeaderParticipation !== null + ? Boolean(object.parallelLeaderParticipation) + : undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate !== undefined && + object.logTransactionSampleRate !== null + ? Number(object.logTransactionSampleRate) + : undefined; + message.planCacheMode = + object.planCacheMode !== undefined && object.planCacheMode !== null + ? postgresqlConfig16_PlanCacheModeFromJSON(object.planCacheMode) + : 0; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + message.sharedPreloadLibraries = (object.sharedPreloadLibraries ?? []).map( + (e: any) => postgresqlConfig16_SharedPreloadLibrariesFromJSON(e) + ); + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration !== undefined && + object.autoExplainLogMinDuration !== null + ? Number(object.autoExplainLogMinDuration) + : undefined; + message.autoExplainLogAnalyze = + object.autoExplainLogAnalyze !== undefined && + object.autoExplainLogAnalyze !== null + ? Boolean(object.autoExplainLogAnalyze) + : undefined; + message.autoExplainLogBuffers = + object.autoExplainLogBuffers !== undefined && + object.autoExplainLogBuffers !== null + ? Boolean(object.autoExplainLogBuffers) + : undefined; + message.autoExplainLogTiming = + object.autoExplainLogTiming !== undefined && + object.autoExplainLogTiming !== null + ? Boolean(object.autoExplainLogTiming) + : undefined; + message.autoExplainLogTriggers = + object.autoExplainLogTriggers !== undefined && + object.autoExplainLogTriggers !== null + ? Boolean(object.autoExplainLogTriggers) + : undefined; + message.autoExplainLogVerbose = + object.autoExplainLogVerbose !== undefined && + object.autoExplainLogVerbose !== null + ? Boolean(object.autoExplainLogVerbose) + : undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements !== undefined && + object.autoExplainLogNestedStatements !== null + ? Boolean(object.autoExplainLogNestedStatements) + : undefined; + message.autoExplainSampleRate = + object.autoExplainSampleRate !== undefined && + object.autoExplainSampleRate !== null + ? Number(object.autoExplainSampleRate) + : undefined; + message.pgHintPlanEnableHint = + object.pgHintPlanEnableHint !== undefined && + object.pgHintPlanEnableHint !== null + ? Boolean(object.pgHintPlanEnableHint) + : undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable !== undefined && + object.pgHintPlanEnableHintTable !== null + ? Boolean(object.pgHintPlanEnableHintTable) + : undefined; + message.pgHintPlanDebugPrint = + object.pgHintPlanDebugPrint !== undefined && + object.pgHintPlanDebugPrint !== null + ? postgresqlConfig16_PgHintPlanDebugPrintFromJSON( + object.pgHintPlanDebugPrint + ) + : 0; + message.pgHintPlanMessageLevel = + object.pgHintPlanMessageLevel !== undefined && + object.pgHintPlanMessageLevel !== null + ? postgresqlConfig16_LogLevelFromJSON(object.pgHintPlanMessageLevel) + : 0; + message.hashMemMultiplier = + object.hashMemMultiplier !== undefined && + object.hashMemMultiplier !== null + ? Number(object.hashMemMultiplier) + : undefined; + message.logicalDecodingWorkMem = + object.logicalDecodingWorkMem !== undefined && + object.logicalDecodingWorkMem !== null + ? Number(object.logicalDecodingWorkMem) + : undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency !== undefined && + object.maintenanceIoConcurrency !== null + ? Number(object.maintenanceIoConcurrency) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.walKeepSize = + object.walKeepSize !== undefined && object.walKeepSize !== null + ? Number(object.walKeepSize) + : undefined; + message.enableIncrementalSort = + object.enableIncrementalSort !== undefined && + object.enableIncrementalSort !== null + ? Boolean(object.enableIncrementalSort) + : undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold !== undefined && + object.autovacuumVacuumInsertThreshold !== null + ? Number(object.autovacuumVacuumInsertThreshold) + : undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor !== undefined && + object.autovacuumVacuumInsertScaleFactor !== null + ? Number(object.autovacuumVacuumInsertScaleFactor) + : undefined; + message.logMinDurationSample = + object.logMinDurationSample !== undefined && + object.logMinDurationSample !== null + ? Number(object.logMinDurationSample) + : undefined; + message.logStatementSampleRate = + object.logStatementSampleRate !== undefined && + object.logStatementSampleRate !== null + ? Number(object.logStatementSampleRate) + : undefined; + message.logParameterMaxLength = + object.logParameterMaxLength !== undefined && + object.logParameterMaxLength !== null + ? Number(object.logParameterMaxLength) + : undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError !== undefined && + object.logParameterMaxLengthOnError !== null + ? Number(object.logParameterMaxLengthOnError) + : undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval !== undefined && + object.clientConnectionCheckInterval !== null + ? Number(object.clientConnectionCheckInterval) + : undefined; + message.enableAsyncAppend = + object.enableAsyncAppend !== undefined && + object.enableAsyncAppend !== null + ? Boolean(object.enableAsyncAppend) + : undefined; + message.enableGathermerge = + object.enableGathermerge !== undefined && + object.enableGathermerge !== null + ? Boolean(object.enableGathermerge) + : undefined; + message.enableMemoize = + object.enableMemoize !== undefined && object.enableMemoize !== null + ? Boolean(object.enableMemoize) + : undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits !== undefined && + object.logRecoveryConflictWaits !== null + ? Boolean(object.logRecoveryConflictWaits) + : undefined; + message.vacuumFailsafeAge = + object.vacuumFailsafeAge !== undefined && + object.vacuumFailsafeAge !== null + ? Number(object.vacuumFailsafeAge) + : undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge !== undefined && + object.vacuumMultixactFailsafeAge !== null + ? Number(object.vacuumMultixactFailsafeAge) + : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.enableGroupByReordering = + object.enableGroupByReordering !== undefined && + object.enableGroupByReordering !== null + ? Boolean(object.enableGroupByReordering) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; + return message; + }, + + toJSON(message: PostgresqlConfig16): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.maintenanceWorkMem !== undefined && + (obj.maintenanceWorkMem = message.maintenanceWorkMem); + message.autovacuumWorkMem !== undefined && + (obj.autovacuumWorkMem = message.autovacuumWorkMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.vacuumCostDelay !== undefined && + (obj.vacuumCostDelay = message.vacuumCostDelay); + message.vacuumCostPageHit !== undefined && + (obj.vacuumCostPageHit = message.vacuumCostPageHit); + message.vacuumCostPageMiss !== undefined && + (obj.vacuumCostPageMiss = message.vacuumCostPageMiss); + message.vacuumCostPageDirty !== undefined && + (obj.vacuumCostPageDirty = message.vacuumCostPageDirty); + message.vacuumCostLimit !== undefined && + (obj.vacuumCostLimit = message.vacuumCostLimit); + message.bgwriterDelay !== undefined && + (obj.bgwriterDelay = message.bgwriterDelay); + message.bgwriterLruMaxpages !== undefined && + (obj.bgwriterLruMaxpages = message.bgwriterLruMaxpages); + message.bgwriterLruMultiplier !== undefined && + (obj.bgwriterLruMultiplier = message.bgwriterLruMultiplier); + message.bgwriterFlushAfter !== undefined && + (obj.bgwriterFlushAfter = message.bgwriterFlushAfter); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.walLevel !== undefined && + (obj.walLevel = postgresqlConfig16_WalLevelToJSON(message.walLevel)); + message.synchronousCommit !== undefined && + (obj.synchronousCommit = postgresqlConfig16_SynchronousCommitToJSON( + message.synchronousCommit + )); + message.checkpointTimeout !== undefined && + (obj.checkpointTimeout = message.checkpointTimeout); + message.checkpointCompletionTarget !== undefined && + (obj.checkpointCompletionTarget = message.checkpointCompletionTarget); + message.checkpointFlushAfter !== undefined && + (obj.checkpointFlushAfter = message.checkpointFlushAfter); + message.maxWalSize !== undefined && (obj.maxWalSize = message.maxWalSize); + message.minWalSize !== undefined && (obj.minWalSize = message.minWalSize); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.defaultStatisticsTarget !== undefined && + (obj.defaultStatisticsTarget = message.defaultStatisticsTarget); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = postgresqlConfig16_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.debugParallelQuery !== undefined && + (obj.debugParallelQuery = postgresqlConfig16_DebugParallelQueryToJSON( + message.debugParallelQuery + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlConfig16_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlConfig16_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlConfig16_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlConfig16_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlConfig16_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlConfig16_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlConfig16_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlConfig16_XmlBinaryToJSON(message.xmlbinary)); + message.xmloption !== undefined && + (obj.xmloption = postgresqlConfig16_XmlOptionToJSON(message.xmloption)); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlConfig16_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.autovacuumMaxWorkers !== undefined && + (obj.autovacuumMaxWorkers = message.autovacuumMaxWorkers); + message.autovacuumVacuumCostDelay !== undefined && + (obj.autovacuumVacuumCostDelay = message.autovacuumVacuumCostDelay); + message.autovacuumVacuumCostLimit !== undefined && + (obj.autovacuumVacuumCostLimit = message.autovacuumVacuumCostLimit); + message.autovacuumNaptime !== undefined && + (obj.autovacuumNaptime = message.autovacuumNaptime); + message.archiveTimeout !== undefined && + (obj.archiveTimeout = message.archiveTimeout); + message.trackActivityQuerySize !== undefined && + (obj.trackActivityQuerySize = message.trackActivityQuerySize); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxWorkerProcesses !== undefined && + (obj.maxWorkerProcesses = message.maxWorkerProcesses); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.autovacuumVacuumScaleFactor !== undefined && + (obj.autovacuumVacuumScaleFactor = message.autovacuumVacuumScaleFactor); + message.autovacuumAnalyzeScaleFactor !== undefined && + (obj.autovacuumAnalyzeScaleFactor = message.autovacuumAnalyzeScaleFactor); + message.defaultTransactionReadOnly !== undefined && + (obj.defaultTransactionReadOnly = message.defaultTransactionReadOnly); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.enableParallelAppend !== undefined && + (obj.enableParallelAppend = message.enableParallelAppend); + message.enableParallelHash !== undefined && + (obj.enableParallelHash = message.enableParallelHash); + message.enablePartitionPruning !== undefined && + (obj.enablePartitionPruning = message.enablePartitionPruning); + message.enablePartitionwiseAggregate !== undefined && + (obj.enablePartitionwiseAggregate = message.enablePartitionwiseAggregate); + message.enablePartitionwiseJoin !== undefined && + (obj.enablePartitionwiseJoin = message.enablePartitionwiseJoin); + message.jit !== undefined && (obj.jit = message.jit); + message.maxParallelMaintenanceWorkers !== undefined && + (obj.maxParallelMaintenanceWorkers = + message.maxParallelMaintenanceWorkers); + message.parallelLeaderParticipation !== undefined && + (obj.parallelLeaderParticipation = message.parallelLeaderParticipation); + message.logTransactionSampleRate !== undefined && + (obj.logTransactionSampleRate = message.logTransactionSampleRate); + message.planCacheMode !== undefined && + (obj.planCacheMode = postgresqlConfig16_PlanCacheModeToJSON( + message.planCacheMode + )); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + if (message.sharedPreloadLibraries) { + obj.sharedPreloadLibraries = message.sharedPreloadLibraries.map((e) => + postgresqlConfig16_SharedPreloadLibrariesToJSON(e) + ); + } else { + obj.sharedPreloadLibraries = []; + } + message.autoExplainLogMinDuration !== undefined && + (obj.autoExplainLogMinDuration = message.autoExplainLogMinDuration); + message.autoExplainLogAnalyze !== undefined && + (obj.autoExplainLogAnalyze = message.autoExplainLogAnalyze); + message.autoExplainLogBuffers !== undefined && + (obj.autoExplainLogBuffers = message.autoExplainLogBuffers); + message.autoExplainLogTiming !== undefined && + (obj.autoExplainLogTiming = message.autoExplainLogTiming); + message.autoExplainLogTriggers !== undefined && + (obj.autoExplainLogTriggers = message.autoExplainLogTriggers); + message.autoExplainLogVerbose !== undefined && + (obj.autoExplainLogVerbose = message.autoExplainLogVerbose); + message.autoExplainLogNestedStatements !== undefined && + (obj.autoExplainLogNestedStatements = + message.autoExplainLogNestedStatements); + message.autoExplainSampleRate !== undefined && + (obj.autoExplainSampleRate = message.autoExplainSampleRate); + message.pgHintPlanEnableHint !== undefined && + (obj.pgHintPlanEnableHint = message.pgHintPlanEnableHint); + message.pgHintPlanEnableHintTable !== undefined && + (obj.pgHintPlanEnableHintTable = message.pgHintPlanEnableHintTable); + message.pgHintPlanDebugPrint !== undefined && + (obj.pgHintPlanDebugPrint = postgresqlConfig16_PgHintPlanDebugPrintToJSON( + message.pgHintPlanDebugPrint + )); + message.pgHintPlanMessageLevel !== undefined && + (obj.pgHintPlanMessageLevel = postgresqlConfig16_LogLevelToJSON( + message.pgHintPlanMessageLevel + )); + message.hashMemMultiplier !== undefined && + (obj.hashMemMultiplier = message.hashMemMultiplier); + message.logicalDecodingWorkMem !== undefined && + (obj.logicalDecodingWorkMem = message.logicalDecodingWorkMem); + message.maintenanceIoConcurrency !== undefined && + (obj.maintenanceIoConcurrency = message.maintenanceIoConcurrency); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.walKeepSize !== undefined && + (obj.walKeepSize = message.walKeepSize); + message.enableIncrementalSort !== undefined && + (obj.enableIncrementalSort = message.enableIncrementalSort); + message.autovacuumVacuumInsertThreshold !== undefined && + (obj.autovacuumVacuumInsertThreshold = + message.autovacuumVacuumInsertThreshold); + message.autovacuumVacuumInsertScaleFactor !== undefined && + (obj.autovacuumVacuumInsertScaleFactor = + message.autovacuumVacuumInsertScaleFactor); + message.logMinDurationSample !== undefined && + (obj.logMinDurationSample = message.logMinDurationSample); + message.logStatementSampleRate !== undefined && + (obj.logStatementSampleRate = message.logStatementSampleRate); + message.logParameterMaxLength !== undefined && + (obj.logParameterMaxLength = message.logParameterMaxLength); + message.logParameterMaxLengthOnError !== undefined && + (obj.logParameterMaxLengthOnError = message.logParameterMaxLengthOnError); + message.clientConnectionCheckInterval !== undefined && + (obj.clientConnectionCheckInterval = + message.clientConnectionCheckInterval); + message.enableAsyncAppend !== undefined && + (obj.enableAsyncAppend = message.enableAsyncAppend); + message.enableGathermerge !== undefined && + (obj.enableGathermerge = message.enableGathermerge); + message.enableMemoize !== undefined && + (obj.enableMemoize = message.enableMemoize); + message.logRecoveryConflictWaits !== undefined && + (obj.logRecoveryConflictWaits = message.logRecoveryConflictWaits); + message.vacuumFailsafeAge !== undefined && + (obj.vacuumFailsafeAge = message.vacuumFailsafeAge); + message.vacuumMultixactFailsafeAge !== undefined && + (obj.vacuumMultixactFailsafeAge = message.vacuumMultixactFailsafeAge); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.enableGroupByReordering !== undefined && + (obj.enableGroupByReordering = message.enableGroupByReordering); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlConfig16 { + const message = { ...basePostgresqlConfig16 } as PostgresqlConfig16; + message.maxConnections = object.maxConnections ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.workMem = object.workMem ?? undefined; + message.maintenanceWorkMem = object.maintenanceWorkMem ?? undefined; + message.autovacuumWorkMem = object.autovacuumWorkMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.vacuumCostDelay = object.vacuumCostDelay ?? undefined; + message.vacuumCostPageHit = object.vacuumCostPageHit ?? undefined; + message.vacuumCostPageMiss = object.vacuumCostPageMiss ?? undefined; + message.vacuumCostPageDirty = object.vacuumCostPageDirty ?? undefined; + message.vacuumCostLimit = object.vacuumCostLimit ?? undefined; + message.bgwriterDelay = object.bgwriterDelay ?? undefined; + message.bgwriterLruMaxpages = object.bgwriterLruMaxpages ?? undefined; + message.bgwriterLruMultiplier = object.bgwriterLruMultiplier ?? undefined; + message.bgwriterFlushAfter = object.bgwriterFlushAfter ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.walLevel = object.walLevel ?? 0; + message.synchronousCommit = object.synchronousCommit ?? 0; + message.checkpointTimeout = object.checkpointTimeout ?? undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget ?? undefined; + message.checkpointFlushAfter = object.checkpointFlushAfter ?? undefined; + message.maxWalSize = object.maxWalSize ?? undefined; + message.minWalSize = object.minWalSize ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.debugParallelQuery = object.debugParallelQuery ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.autovacuumMaxWorkers = object.autovacuumMaxWorkers ?? undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay ?? undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit ?? undefined; + message.autovacuumNaptime = object.autovacuumNaptime ?? undefined; + message.archiveTimeout = object.archiveTimeout ?? undefined; + message.trackActivityQuerySize = object.trackActivityQuerySize ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxWorkerProcesses = object.maxWorkerProcesses ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor ?? undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor ?? undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly ?? undefined; + message.timezone = object.timezone ?? ""; + message.enableParallelAppend = object.enableParallelAppend ?? undefined; + message.enableParallelHash = object.enableParallelHash ?? undefined; + message.enablePartitionPruning = object.enablePartitionPruning ?? undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate ?? undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin ?? undefined; + message.jit = object.jit ?? undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers ?? undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation ?? undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate ?? undefined; + message.planCacheMode = object.planCacheMode ?? 0; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + message.sharedPreloadLibraries = + object.sharedPreloadLibraries?.map((e) => e) || []; + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration ?? undefined; + message.autoExplainLogAnalyze = object.autoExplainLogAnalyze ?? undefined; + message.autoExplainLogBuffers = object.autoExplainLogBuffers ?? undefined; + message.autoExplainLogTiming = object.autoExplainLogTiming ?? undefined; + message.autoExplainLogTriggers = object.autoExplainLogTriggers ?? undefined; + message.autoExplainLogVerbose = object.autoExplainLogVerbose ?? undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements ?? undefined; + message.autoExplainSampleRate = object.autoExplainSampleRate ?? undefined; + message.pgHintPlanEnableHint = object.pgHintPlanEnableHint ?? undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable ?? undefined; + message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; + message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.hashMemMultiplier = object.hashMemMultiplier ?? undefined; + message.logicalDecodingWorkMem = object.logicalDecodingWorkMem ?? undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.walKeepSize = object.walKeepSize ?? undefined; + message.enableIncrementalSort = object.enableIncrementalSort ?? undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold ?? undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor ?? undefined; + message.logMinDurationSample = object.logMinDurationSample ?? undefined; + message.logStatementSampleRate = object.logStatementSampleRate ?? undefined; + message.logParameterMaxLength = object.logParameterMaxLength ?? undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError ?? undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval ?? undefined; + message.enableAsyncAppend = object.enableAsyncAppend ?? undefined; + message.enableGathermerge = object.enableGathermerge ?? undefined; + message.enableMemoize = object.enableMemoize ?? undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits ?? undefined; + message.vacuumFailsafeAge = object.vacuumFailsafeAge ?? undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.enableGroupByReordering = + object.enableGroupByReordering ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlConfig16.$type, PostgresqlConfig16); + +const basePostgresqlConfigSet16: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet16", +}; + +export const PostgresqlConfigSet16 = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet16" as const, + + encode( + message: PostgresqlConfigSet16, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + PostgresqlConfig16.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + PostgresqlConfig16.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + PostgresqlConfig16.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PostgresqlConfigSet16 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlConfigSet16 } as PostgresqlConfigSet16; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = PostgresqlConfig16.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = PostgresqlConfig16.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = PostgresqlConfig16.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlConfigSet16 { + const message = { ...basePostgresqlConfigSet16 } as PostgresqlConfigSet16; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? PostgresqlConfig16.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? PostgresqlConfig16.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? PostgresqlConfig16.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: PostgresqlConfigSet16): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? PostgresqlConfig16.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? PostgresqlConfig16.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? PostgresqlConfig16.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlConfigSet16 { + const message = { ...basePostgresqlConfigSet16 } as PostgresqlConfigSet16; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? PostgresqlConfig16.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? PostgresqlConfig16.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? PostgresqlConfig16.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlConfigSet16.$type, PostgresqlConfigSet16); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql16_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql16_1c.ts new file mode 100644 index 00000000..276f47c8 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql16_1c.ts @@ -0,0 +1,4690 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface Postgresqlconfig161c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig16_1C"; + maxConnections?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + maxPreparedTransactions?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + maintenanceWorkMem?: number; + /** in bytes. */ + autovacuumWorkMem?: number; + /** in bytes. */ + tempFileLimit?: number; + /** in milliseconds. */ + vacuumCostDelay?: number; + vacuumCostPageHit?: number; + vacuumCostPageMiss?: number; + vacuumCostPageDirty?: number; + vacuumCostLimit?: number; + /** in milliseconds. */ + bgwriterDelay?: number; + bgwriterLruMaxpages?: number; + bgwriterLruMultiplier?: number; + bgwriterFlushAfter?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + walLevel: Postgresqlconfig161c_WalLevel; + synchronousCommit: Postgresqlconfig161c_SynchronousCommit; + /** in milliseconds. */ + checkpointTimeout?: number; + checkpointCompletionTarget?: number; + checkpointFlushAfter?: number; + /** in bytes. */ + maxWalSize?: number; + /** in bytes. */ + minWalSize?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + defaultStatisticsTarget?: number; + constraintExclusion: Postgresqlconfig161c_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + debugParallelQuery: Postgresqlconfig161c_DebugParallelQuery; + clientMinMessages: Postgresqlconfig161c_LogLevel; + logMinMessages: Postgresqlconfig161c_LogLevel; + logMinErrorStatement: Postgresqlconfig161c_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: Postgresqlconfig161c_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: Postgresqlconfig161c_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: Postgresqlconfig161c_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: Postgresqlconfig161c_ByteaOutput; + xmlbinary: Postgresqlconfig161c_XmlBinary; + xmloption: Postgresqlconfig161c_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: Postgresqlconfig161c_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + autovacuumMaxWorkers?: number; + autovacuumVacuumCostDelay?: number; + autovacuumVacuumCostLimit?: number; + /** in milliseconds. */ + autovacuumNaptime?: number; + /** in milliseconds. */ + archiveTimeout?: number; + trackActivityQuerySize?: number; + onlineAnalyzeEnable?: boolean; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxWorkerProcesses?: number; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + autovacuumVacuumScaleFactor?: number; + autovacuumAnalyzeScaleFactor?: number; + defaultTransactionReadOnly?: boolean; + timezone: string; + enableParallelAppend?: boolean; + enableParallelHash?: boolean; + enablePartitionPruning?: boolean; + enablePartitionwiseAggregate?: boolean; + enablePartitionwiseJoin?: boolean; + jit?: boolean; + maxParallelMaintenanceWorkers?: number; + parallelLeaderParticipation?: boolean; + logTransactionSampleRate?: number; + planCacheMode: Postgresqlconfig161c_PlanCacheMode; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; + sharedPreloadLibraries: Postgresqlconfig161c_SharedPreloadLibraries[]; + /** in milliseconds. */ + autoExplainLogMinDuration?: number; + autoExplainLogAnalyze?: boolean; + autoExplainLogBuffers?: boolean; + autoExplainLogTiming?: boolean; + autoExplainLogTriggers?: boolean; + autoExplainLogVerbose?: boolean; + autoExplainLogNestedStatements?: boolean; + autoExplainSampleRate?: number; + pgHintPlanEnableHint?: boolean; + pgHintPlanEnableHintTable?: boolean; + pgHintPlanDebugPrint: Postgresqlconfig161c_PgHintPlanDebugPrint; + pgHintPlanMessageLevel: Postgresqlconfig161c_LogLevel; + hashMemMultiplier?: number; + /** in bytes. */ + logicalDecodingWorkMem?: number; + maintenanceIoConcurrency?: number; + /** in bytes. */ + maxSlotWalKeepSize?: number; + /** in bytes. */ + walKeepSize?: number; + enableIncrementalSort?: boolean; + autovacuumVacuumInsertThreshold?: number; + autovacuumVacuumInsertScaleFactor?: number; + /** in milliseconds. */ + logMinDurationSample?: number; + logStatementSampleRate?: number; + /** in bytes. */ + logParameterMaxLength?: number; + /** in bytes. */ + logParameterMaxLengthOnError?: number; + /** in milliseconds. */ + clientConnectionCheckInterval?: number; + enableAsyncAppend?: boolean; + enableGathermerge?: boolean; + enableMemoize?: boolean; + /** in milliseconds. */ + logRecoveryConflictWaits?: boolean; + /** in milliseconds. */ + vacuumFailsafeAge?: number; + /** in milliseconds. */ + vacuumMultixactFailsafeAge?: number; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; + plantunerFixEmptyTable?: boolean; + /** in bytes. */ + maxStackDepth?: number; + enableGroupByReordering?: boolean; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** number of individuals in the genetic population, useful values are typically 100 to 1000; default - 0 - choose based on based on geqo_effort */ + geqoPoolSize?: number; + /** the number of generations used by GEQO, useful values are in the same range as the pool size */ + geqoGenerations?: number; + /** selective pressure within the population */ + geqoSelectionBias?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; + pgTrgmSimilarityThreshold?: number; + pgTrgmWordSimilarityThreshold?: number; + pgTrgmStrictWordSimilarityThreshold?: number; + /** in milliseconds. */ + maxStandbyArchiveDelay?: number; + /** Terminate any session that exceeds the designated timeout, specified in milliseconds. If a timeout is not specified, the default session timeout is set to 12 hours. To disable it, specify a value of 0. */ + sessionDurationTimeout?: number; + logReplicationCommands?: boolean; + /** in milliseconds. The default is 1000 (1 sec). */ + logAutovacuumMinDuration?: number; +} + +export enum Postgresqlconfig161c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_BackslashQuoteFromJSON( + object: any +): Postgresqlconfig161c_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlconfig161c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return Postgresqlconfig161c_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return Postgresqlconfig161c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlconfig161c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlconfig161c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_BackslashQuoteToJSON( + object: Postgresqlconfig161c_BackslashQuote +): string { + switch (object) { + case Postgresqlconfig161c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlconfig161c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlconfig161c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlconfig161c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlconfig161c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_ByteaOutputFromJSON( + object: any +): Postgresqlconfig161c_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlconfig161c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return Postgresqlconfig161c_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlconfig161c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_ByteaOutputToJSON( + object: Postgresqlconfig161c_ByteaOutput +): string { + switch (object) { + case Postgresqlconfig161c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlconfig161c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlconfig161c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_ConstraintExclusionFromJSON( + object: any +): Postgresqlconfig161c_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return Postgresqlconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return Postgresqlconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return Postgresqlconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return Postgresqlconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_ConstraintExclusionToJSON( + object: Postgresqlconfig161c_ConstraintExclusion +): string { + switch (object) { + case Postgresqlconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case Postgresqlconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case Postgresqlconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case Postgresqlconfig161c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_DebugParallelQuery { + DEBUG_PARALLEL_QUERY_UNSPECIFIED = 0, + DEBUG_PARALLEL_QUERY_ON = 1, + DEBUG_PARALLEL_QUERY_OFF = 2, + DEBUG_PARALLEL_QUERY_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_DebugParallelQueryFromJSON( + object: any +): Postgresqlconfig161c_DebugParallelQuery { + switch (object) { + case 0: + case "DEBUG_PARALLEL_QUERY_UNSPECIFIED": + return Postgresqlconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_UNSPECIFIED; + case 1: + case "DEBUG_PARALLEL_QUERY_ON": + return Postgresqlconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_ON; + case 2: + case "DEBUG_PARALLEL_QUERY_OFF": + return Postgresqlconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_OFF; + case 3: + case "DEBUG_PARALLEL_QUERY_REGRESS": + return Postgresqlconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_DebugParallelQuery.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_DebugParallelQueryToJSON( + object: Postgresqlconfig161c_DebugParallelQuery +): string { + switch (object) { + case Postgresqlconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_UNSPECIFIED: + return "DEBUG_PARALLEL_QUERY_UNSPECIFIED"; + case Postgresqlconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_ON: + return "DEBUG_PARALLEL_QUERY_ON"; + case Postgresqlconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_OFF: + return "DEBUG_PARALLEL_QUERY_OFF"; + case Postgresqlconfig161c_DebugParallelQuery.DEBUG_PARALLEL_QUERY_REGRESS: + return "DEBUG_PARALLEL_QUERY_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlconfig161c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_LogErrorVerbosityToJSON( + object: Postgresqlconfig161c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlconfig161c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_INFO = 12, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_LogLevelFromJSON( + object: any +): Postgresqlconfig161c_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_DEBUG1; + case 12: + case "LOG_LEVEL_INFO": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_INFO; + case 6: + case "LOG_LEVEL_LOG": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return Postgresqlconfig161c_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_LogLevelToJSON( + object: Postgresqlconfig161c_LogLevel +): string { + switch (object) { + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_INFO: + return "LOG_LEVEL_INFO"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case Postgresqlconfig161c_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_LogStatementFromJSON( + object: any +): Postgresqlconfig161c_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return Postgresqlconfig161c_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return Postgresqlconfig161c_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return Postgresqlconfig161c_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return Postgresqlconfig161c_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return Postgresqlconfig161c_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_LogStatementToJSON( + object: Postgresqlconfig161c_LogStatement +): string { + switch (object) { + case Postgresqlconfig161c_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case Postgresqlconfig161c_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case Postgresqlconfig161c_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case Postgresqlconfig161c_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case Postgresqlconfig161c_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_PgHintPlanDebugPrintFromJSON( + object: any +): Postgresqlconfig161c_PgHintPlanDebugPrint { + switch (object) { + case 0: + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return Postgresqlconfig161c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case 1: + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return Postgresqlconfig161c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case 2: + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return Postgresqlconfig161c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; + case 3: + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return Postgresqlconfig161c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; + case 4: + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return Postgresqlconfig161c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_PgHintPlanDebugPrint.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_PgHintPlanDebugPrintToJSON( + object: Postgresqlconfig161c_PgHintPlanDebugPrint +): string { + switch (object) { + case Postgresqlconfig161c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case Postgresqlconfig161c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case Postgresqlconfig161c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case Postgresqlconfig161c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case Postgresqlconfig161c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_PlanCacheModeFromJSON( + object: any +): Postgresqlconfig161c_PlanCacheMode { + switch (object) { + case 0: + case "PLAN_CACHE_MODE_UNSPECIFIED": + return Postgresqlconfig161c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case 1: + case "PLAN_CACHE_MODE_AUTO": + return Postgresqlconfig161c_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case 2: + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return Postgresqlconfig161c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return Postgresqlconfig161c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_PlanCacheMode.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_PlanCacheModeToJSON( + object: Postgresqlconfig161c_PlanCacheMode +): string { + switch (object) { + case Postgresqlconfig161c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case Postgresqlconfig161c_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case Postgresqlconfig161c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case Postgresqlconfig161c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + SHARED_PRELOAD_LIBRARIES_PG_PREWARM = 7, + SHARED_PRELOAD_LIBRARIES_PGAUDIT = 8, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_SharedPreloadLibrariesFromJSON( + object: any +): Postgresqlconfig161c_SharedPreloadLibraries { + switch (object) { + case 0: + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case 1: + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case 2: + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case 7: + case "SHARED_PRELOAD_LIBRARIES_PG_PREWARM": + return Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM; + case 8: + case "SHARED_PRELOAD_LIBRARIES_PGAUDIT": + return Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_SharedPreloadLibraries.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_SharedPreloadLibrariesToJSON( + object: Postgresqlconfig161c_SharedPreloadLibraries +): string { + switch (object) { + case Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + case Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_PREWARM: + return "SHARED_PRELOAD_LIBRARIES_PG_PREWARM"; + case Postgresqlconfig161c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGAUDIT: + return "SHARED_PRELOAD_LIBRARIES_PGAUDIT"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_SynchronousCommitFromJSON( + object: any +): Postgresqlconfig161c_SynchronousCommit { + switch (object) { + case 0: + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case 1: + case "SYNCHRONOUS_COMMIT_ON": + return Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case 2: + case "SYNCHRONOUS_COMMIT_OFF": + return Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_SynchronousCommit.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_SynchronousCommitToJSON( + object: Postgresqlconfig161c_SynchronousCommit +): string { + switch (object) { + case Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case Postgresqlconfig161c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_TransactionIsolationFromJSON( + object: any +): Postgresqlconfig161c_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return Postgresqlconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return Postgresqlconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return Postgresqlconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return Postgresqlconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return Postgresqlconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_TransactionIsolationToJSON( + object: Postgresqlconfig161c_TransactionIsolation +): string { + switch (object) { + case Postgresqlconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case Postgresqlconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case Postgresqlconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case Postgresqlconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case Postgresqlconfig161c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_WalLevelFromJSON( + object: any +): Postgresqlconfig161c_WalLevel { + switch (object) { + case 0: + case "WAL_LEVEL_UNSPECIFIED": + return Postgresqlconfig161c_WalLevel.WAL_LEVEL_UNSPECIFIED; + case 1: + case "WAL_LEVEL_REPLICA": + return Postgresqlconfig161c_WalLevel.WAL_LEVEL_REPLICA; + case 2: + case "WAL_LEVEL_LOGICAL": + return Postgresqlconfig161c_WalLevel.WAL_LEVEL_LOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_WalLevel.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_WalLevelToJSON( + object: Postgresqlconfig161c_WalLevel +): string { + switch (object) { + case Postgresqlconfig161c_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case Postgresqlconfig161c_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case Postgresqlconfig161c_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_XmlBinaryFromJSON( + object: any +): Postgresqlconfig161c_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return Postgresqlconfig161c_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return Postgresqlconfig161c_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return Postgresqlconfig161c_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_XmlBinaryToJSON( + object: Postgresqlconfig161c_XmlBinary +): string { + switch (object) { + case Postgresqlconfig161c_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case Postgresqlconfig161c_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case Postgresqlconfig161c_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig161c_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig161c_XmlOptionFromJSON( + object: any +): Postgresqlconfig161c_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return Postgresqlconfig161c_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return Postgresqlconfig161c_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return Postgresqlconfig161c_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig161c_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlconfig161c_XmlOptionToJSON( + object: Postgresqlconfig161c_XmlOption +): string { + switch (object) { + case Postgresqlconfig161c_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case Postgresqlconfig161c_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case Postgresqlconfig161c_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +export interface Postgresqlconfigset161c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet16_1C"; + /** + * Effective settings for a PostgreSQL 16 1C cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Postgresqlconfig161c; + /** User-defined settings for a PostgreSQL 16 1C cluster. */ + userConfig?: Postgresqlconfig161c; + /** Default configuration for a PostgreSQL 16 1C cluster. */ + defaultConfig?: Postgresqlconfig161c; +} + +const basePostgresqlconfig161c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig16_1C", + walLevel: 0, + synchronousCommit: 0, + constraintExclusion: 0, + debugParallelQuery: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", + planCacheMode: 0, + sharedPreloadLibraries: 0, + pgHintPlanDebugPrint: 0, + pgHintPlanMessageLevel: 0, +}; + +export const Postgresqlconfig161c = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig16_1C" as const, + + encode( + message: Postgresqlconfig161c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maintenanceWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceWorkMem!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.autovacuumWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumWorkMem!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.vacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostDelay!, + }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.vacuumCostPageHit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageHit!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.vacuumCostPageMiss !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageMiss!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.vacuumCostPageDirty !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageDirty!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.vacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostLimit!, + }, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.bgwriterDelay !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.bgwriterDelay! }, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.bgwriterLruMaxpages !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterLruMaxpages!, + }, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.bgwriterLruMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.bgwriterLruMultiplier!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.bgwriterFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterFlushAfter!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.walLevel !== 0) { + writer.uint32(160).int32(message.walLevel); + } + if (message.synchronousCommit !== 0) { + writer.uint32(168).int32(message.synchronousCommit); + } + if (message.checkpointTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointTimeout!, + }, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.checkpointCompletionTarget !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.checkpointCompletionTarget!, + }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.checkpointFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointFlushAfter!, + }, + writer.uint32(194).fork() + ).ldelim(); + } + if (message.maxWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxWalSize! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.minWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.minWalSize! }, + writer.uint32(210).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultStatisticsTarget !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.defaultStatisticsTarget!, + }, + writer.uint32(226).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(232).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(258).fork() + ).ldelim(); + } + if (message.debugParallelQuery !== 0) { + writer.uint32(264).int32(message.debugParallelQuery); + } + if (message.clientMinMessages !== 0) { + writer.uint32(272).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(280).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(288).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(322).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(336).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(352).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(370).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(384).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(416).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(424).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(432).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(480).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(554).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(562).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(570).fork() + ).ldelim(); + } + if (message.autovacuumMaxWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumMaxWorkers!, + }, + writer.uint32(578).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostDelay!, + }, + writer.uint32(586).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostLimit!, + }, + writer.uint32(594).fork() + ).ldelim(); + } + if (message.autovacuumNaptime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumNaptime!, + }, + writer.uint32(602).fork() + ).ldelim(); + } + if (message.archiveTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.archiveTimeout! }, + writer.uint32(610).fork() + ).ldelim(); + } + if (message.trackActivityQuerySize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.trackActivityQuerySize!, + }, + writer.uint32(618).fork() + ).ldelim(); + } + if (message.onlineAnalyzeEnable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.onlineAnalyzeEnable!, + }, + writer.uint32(634).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(642).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(650).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(658).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(666).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(674).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(682).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(690).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(698).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(706).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(714).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(722).fork() + ).ldelim(); + } + if (message.maxWorkerProcesses !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxWorkerProcesses!, + }, + writer.uint32(730).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(738).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(746).fork() + ).ldelim(); + } + if (message.autovacuumVacuumScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumScaleFactor!, + }, + writer.uint32(754).fork() + ).ldelim(); + } + if (message.autovacuumAnalyzeScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumAnalyzeScaleFactor!, + }, + writer.uint32(762).fork() + ).ldelim(); + } + if (message.defaultTransactionReadOnly !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.defaultTransactionReadOnly!, + }, + writer.uint32(770).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(778).string(message.timezone); + } + if (message.enableParallelAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelAppend!, + }, + writer.uint32(786).fork() + ).ldelim(); + } + if (message.enableParallelHash !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelHash!, + }, + writer.uint32(794).fork() + ).ldelim(); + } + if (message.enablePartitionPruning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionPruning!, + }, + writer.uint32(802).fork() + ).ldelim(); + } + if (message.enablePartitionwiseAggregate !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseAggregate!, + }, + writer.uint32(810).fork() + ).ldelim(); + } + if (message.enablePartitionwiseJoin !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseJoin!, + }, + writer.uint32(818).fork() + ).ldelim(); + } + if (message.jit !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.jit! }, + writer.uint32(826).fork() + ).ldelim(); + } + if (message.maxParallelMaintenanceWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelMaintenanceWorkers!, + }, + writer.uint32(834).fork() + ).ldelim(); + } + if (message.parallelLeaderParticipation !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.parallelLeaderParticipation!, + }, + writer.uint32(842).fork() + ).ldelim(); + } + if (message.logTransactionSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logTransactionSampleRate!, + }, + writer.uint32(858).fork() + ).ldelim(); + } + if (message.planCacheMode !== 0) { + writer.uint32(864).int32(message.planCacheMode); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(874).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(882).fork() + ).ldelim(); + } + writer.uint32(890).fork(); + for (const v of message.sharedPreloadLibraries) { + writer.int32(v); + } + writer.ldelim(); + if (message.autoExplainLogMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autoExplainLogMinDuration!, + }, + writer.uint32(898).fork() + ).ldelim(); + } + if (message.autoExplainLogAnalyze !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogAnalyze!, + }, + writer.uint32(906).fork() + ).ldelim(); + } + if (message.autoExplainLogBuffers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogBuffers!, + }, + writer.uint32(914).fork() + ).ldelim(); + } + if (message.autoExplainLogTiming !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTiming!, + }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.autoExplainLogTriggers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTriggers!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.autoExplainLogVerbose !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogVerbose!, + }, + writer.uint32(938).fork() + ).ldelim(); + } + if (message.autoExplainLogNestedStatements !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogNestedStatements!, + }, + writer.uint32(946).fork() + ).ldelim(); + } + if (message.autoExplainSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autoExplainSampleRate!, + }, + writer.uint32(954).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHint !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHint!, + }, + writer.uint32(962).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHintTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHintTable!, + }, + writer.uint32(970).fork() + ).ldelim(); + } + if (message.pgHintPlanDebugPrint !== 0) { + writer.uint32(976).int32(message.pgHintPlanDebugPrint); + } + if (message.pgHintPlanMessageLevel !== 0) { + writer.uint32(984).int32(message.pgHintPlanMessageLevel); + } + if (message.hashMemMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.hashMemMultiplier!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.logicalDecodingWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logicalDecodingWorkMem!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.maintenanceIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceIoConcurrency!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } + if (message.walKeepSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.walKeepSize! }, + writer.uint32(1034).fork() + ).ldelim(); + } + if (message.enableIncrementalSort !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIncrementalSort!, + }, + writer.uint32(1042).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumInsertThreshold!, + }, + writer.uint32(1050).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumInsertScaleFactor!, + }, + writer.uint32(1058).fork() + ).ldelim(); + } + if (message.logMinDurationSample !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationSample!, + }, + writer.uint32(1066).fork() + ).ldelim(); + } + if (message.logStatementSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logStatementSampleRate!, + }, + writer.uint32(1074).fork() + ).ldelim(); + } + if (message.logParameterMaxLength !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLength!, + }, + writer.uint32(1082).fork() + ).ldelim(); + } + if (message.logParameterMaxLengthOnError !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLengthOnError!, + }, + writer.uint32(1090).fork() + ).ldelim(); + } + if (message.clientConnectionCheckInterval !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.clientConnectionCheckInterval!, + }, + writer.uint32(1098).fork() + ).ldelim(); + } + if (message.enableAsyncAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableAsyncAppend!, + }, + writer.uint32(1106).fork() + ).ldelim(); + } + if (message.enableGathermerge !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableGathermerge!, + }, + writer.uint32(1114).fork() + ).ldelim(); + } + if (message.enableMemoize !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMemoize! }, + writer.uint32(1122).fork() + ).ldelim(); + } + if (message.logRecoveryConflictWaits !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logRecoveryConflictWaits!, + }, + writer.uint32(1130).fork() + ).ldelim(); + } + if (message.vacuumFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumFailsafeAge!, + }, + writer.uint32(1138).fork() + ).ldelim(); + } + if (message.vacuumMultixactFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumMultixactFailsafeAge!, + }, + writer.uint32(1146).fork() + ).ldelim(); + } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(1154).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1162).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1170).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1178).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1186).fork() + ).ldelim(); + } + if (message.plantunerFixEmptyTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.plantunerFixEmptyTable!, + }, + writer.uint32(1194).fork() + ).ldelim(); + } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.enableGroupByReordering !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableGroupByReordering!, + }, + writer.uint32(1210).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoPoolSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoPoolSize! }, + writer.uint32(1242).fork() + ).ldelim(); + } + if (message.geqoGenerations !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.geqoGenerations!, + }, + writer.uint32(1250).fork() + ).ldelim(); + } + if (message.geqoSelectionBias !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.geqoSelectionBias!, + }, + writer.uint32(1258).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } + if (message.pgTrgmSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmSimilarityThreshold!, + }, + writer.uint32(1274).fork() + ).ldelim(); + } + if (message.pgTrgmWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmWordSimilarityThreshold!, + }, + writer.uint32(1282).fork() + ).ldelim(); + } + if (message.pgTrgmStrictWordSimilarityThreshold !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgTrgmStrictWordSimilarityThreshold!, + }, + writer.uint32(1290).fork() + ).ldelim(); + } + if (message.maxStandbyArchiveDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyArchiveDelay!, + }, + writer.uint32(1298).fork() + ).ldelim(); + } + if (message.sessionDurationTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.sessionDurationTimeout!, + }, + writer.uint32(1306).fork() + ).ldelim(); + } + if (message.logReplicationCommands !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logReplicationCommands!, + }, + writer.uint32(1314).fork() + ).ldelim(); + } + if (message.logAutovacuumMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logAutovacuumMinDuration!, + }, + writer.uint32(1322).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlconfig161c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlconfig161c } as Postgresqlconfig161c; + message.sharedPreloadLibraries = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 6: + message.maintenanceWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.autovacuumWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.vacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.vacuumCostPageHit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.vacuumCostPageMiss = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.vacuumCostPageDirty = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.vacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 14: + message.bgwriterDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 15: + message.bgwriterLruMaxpages = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 16: + message.bgwriterLruMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.bgwriterFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.walLevel = reader.int32() as any; + break; + case 21: + message.synchronousCommit = reader.int32() as any; + break; + case 22: + message.checkpointTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 23: + message.checkpointCompletionTarget = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.checkpointFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 25: + message.maxWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 26: + message.minWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 27: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 28: + message.defaultStatisticsTarget = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 29: + message.constraintExclusion = reader.int32() as any; + break; + case 30: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 33: + message.debugParallelQuery = reader.int32() as any; + break; + case 34: + message.clientMinMessages = reader.int32() as any; + break; + case 35: + message.logMinMessages = reader.int32() as any; + break; + case 36: + message.logMinErrorStatement = reader.int32() as any; + break; + case 37: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 40: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 41: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 42: + message.logErrorVerbosity = reader.int32() as any; + break; + case 43: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 44: + message.logStatement = reader.int32() as any; + break; + case 45: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.searchPath = reader.string(); + break; + case 47: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 48: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 49: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 50: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 52: + message.byteaOutput = reader.int32() as any; + break; + case 53: + message.xmlbinary = reader.int32() as any; + break; + case 54: + message.xmloption = reader.int32() as any; + break; + case 55: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 60: + message.backslashQuote = reader.int32() as any; + break; + case 61: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 70: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 71: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 72: + message.autovacuumMaxWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 73: + message.autovacuumVacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 74: + message.autovacuumVacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 75: + message.autovacuumNaptime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 76: + message.archiveTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 77: + message.trackActivityQuerySize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 79: + message.onlineAnalyzeEnable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 80: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 81: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 82: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 83: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 84: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 85: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 86: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 87: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 88: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 89: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 90: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 91: + message.maxWorkerProcesses = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 92: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 93: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 94: + message.autovacuumVacuumScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 95: + message.autovacuumAnalyzeScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 96: + message.defaultTransactionReadOnly = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 97: + message.timezone = reader.string(); + break; + case 98: + message.enableParallelAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 99: + message.enableParallelHash = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 100: + message.enablePartitionPruning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 101: + message.enablePartitionwiseAggregate = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 102: + message.enablePartitionwiseJoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 103: + message.jit = BoolValue.decode(reader, reader.uint32()).value; + break; + case 104: + message.maxParallelMaintenanceWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 105: + message.parallelLeaderParticipation = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 107: + message.logTransactionSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 108: + message.planCacheMode = reader.int32() as any; + break; + case 109: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 110: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 111: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + } else { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + break; + case 112: + message.autoExplainLogMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 113: + message.autoExplainLogAnalyze = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 114: + message.autoExplainLogBuffers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 115: + message.autoExplainLogTiming = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.autoExplainLogTriggers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.autoExplainLogVerbose = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 118: + message.autoExplainLogNestedStatements = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 119: + message.autoExplainSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 120: + message.pgHintPlanEnableHint = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 121: + message.pgHintPlanEnableHintTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 122: + message.pgHintPlanDebugPrint = reader.int32() as any; + break; + case 123: + message.pgHintPlanMessageLevel = reader.int32() as any; + break; + case 124: + message.hashMemMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.logicalDecodingWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.maintenanceIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 129: + message.walKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 130: + message.enableIncrementalSort = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 131: + message.autovacuumVacuumInsertThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 132: + message.autovacuumVacuumInsertScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 133: + message.logMinDurationSample = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 134: + message.logStatementSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 135: + message.logParameterMaxLength = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 136: + message.logParameterMaxLengthOnError = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 137: + message.clientConnectionCheckInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 138: + message.enableAsyncAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 139: + message.enableGathermerge = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 140: + message.enableMemoize = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 141: + message.logRecoveryConflictWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 142: + message.vacuumFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 143: + message.vacuumMultixactFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 144: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 145: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 146: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 147: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 148: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 149: + message.plantunerFixEmptyTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 151: + message.enableGroupByReordering = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 155: + message.geqoPoolSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 156: + message.geqoGenerations = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 157: + message.geqoSelectionBias = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; + case 159: + message.pgTrgmSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 160: + message.pgTrgmWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 161: + message.pgTrgmStrictWordSimilarityThreshold = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 162: + message.maxStandbyArchiveDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 163: + message.sessionDurationTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 164: + message.logReplicationCommands = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 165: + message.logAutovacuumMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlconfig161c { + const message = { ...basePostgresqlconfig161c } as Postgresqlconfig161c; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.maintenanceWorkMem = + object.maintenanceWorkMem !== undefined && + object.maintenanceWorkMem !== null + ? Number(object.maintenanceWorkMem) + : undefined; + message.autovacuumWorkMem = + object.autovacuumWorkMem !== undefined && + object.autovacuumWorkMem !== null + ? Number(object.autovacuumWorkMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.vacuumCostDelay = + object.vacuumCostDelay !== undefined && object.vacuumCostDelay !== null + ? Number(object.vacuumCostDelay) + : undefined; + message.vacuumCostPageHit = + object.vacuumCostPageHit !== undefined && + object.vacuumCostPageHit !== null + ? Number(object.vacuumCostPageHit) + : undefined; + message.vacuumCostPageMiss = + object.vacuumCostPageMiss !== undefined && + object.vacuumCostPageMiss !== null + ? Number(object.vacuumCostPageMiss) + : undefined; + message.vacuumCostPageDirty = + object.vacuumCostPageDirty !== undefined && + object.vacuumCostPageDirty !== null + ? Number(object.vacuumCostPageDirty) + : undefined; + message.vacuumCostLimit = + object.vacuumCostLimit !== undefined && object.vacuumCostLimit !== null + ? Number(object.vacuumCostLimit) + : undefined; + message.bgwriterDelay = + object.bgwriterDelay !== undefined && object.bgwriterDelay !== null + ? Number(object.bgwriterDelay) + : undefined; + message.bgwriterLruMaxpages = + object.bgwriterLruMaxpages !== undefined && + object.bgwriterLruMaxpages !== null + ? Number(object.bgwriterLruMaxpages) + : undefined; + message.bgwriterLruMultiplier = + object.bgwriterLruMultiplier !== undefined && + object.bgwriterLruMultiplier !== null + ? Number(object.bgwriterLruMultiplier) + : undefined; + message.bgwriterFlushAfter = + object.bgwriterFlushAfter !== undefined && + object.bgwriterFlushAfter !== null + ? Number(object.bgwriterFlushAfter) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.walLevel = + object.walLevel !== undefined && object.walLevel !== null + ? postgresqlconfig161c_WalLevelFromJSON(object.walLevel) + : 0; + message.synchronousCommit = + object.synchronousCommit !== undefined && + object.synchronousCommit !== null + ? postgresqlconfig161c_SynchronousCommitFromJSON( + object.synchronousCommit + ) + : 0; + message.checkpointTimeout = + object.checkpointTimeout !== undefined && + object.checkpointTimeout !== null + ? Number(object.checkpointTimeout) + : undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget !== undefined && + object.checkpointCompletionTarget !== null + ? Number(object.checkpointCompletionTarget) + : undefined; + message.checkpointFlushAfter = + object.checkpointFlushAfter !== undefined && + object.checkpointFlushAfter !== null + ? Number(object.checkpointFlushAfter) + : undefined; + message.maxWalSize = + object.maxWalSize !== undefined && object.maxWalSize !== null + ? Number(object.maxWalSize) + : undefined; + message.minWalSize = + object.minWalSize !== undefined && object.minWalSize !== null + ? Number(object.minWalSize) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget !== undefined && + object.defaultStatisticsTarget !== null + ? Number(object.defaultStatisticsTarget) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlconfig161c_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.debugParallelQuery = + object.debugParallelQuery !== undefined && + object.debugParallelQuery !== null + ? postgresqlconfig161c_DebugParallelQueryFromJSON( + object.debugParallelQuery + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlconfig161c_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlconfig161c_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlconfig161c_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlconfig161c_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlconfig161c_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlconfig161c_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlconfig161c_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlconfig161c_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlconfig161c_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlconfig161c_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.autovacuumMaxWorkers = + object.autovacuumMaxWorkers !== undefined && + object.autovacuumMaxWorkers !== null + ? Number(object.autovacuumMaxWorkers) + : undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay !== undefined && + object.autovacuumVacuumCostDelay !== null + ? Number(object.autovacuumVacuumCostDelay) + : undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit !== undefined && + object.autovacuumVacuumCostLimit !== null + ? Number(object.autovacuumVacuumCostLimit) + : undefined; + message.autovacuumNaptime = + object.autovacuumNaptime !== undefined && + object.autovacuumNaptime !== null + ? Number(object.autovacuumNaptime) + : undefined; + message.archiveTimeout = + object.archiveTimeout !== undefined && object.archiveTimeout !== null + ? Number(object.archiveTimeout) + : undefined; + message.trackActivityQuerySize = + object.trackActivityQuerySize !== undefined && + object.trackActivityQuerySize !== null + ? Number(object.trackActivityQuerySize) + : undefined; + message.onlineAnalyzeEnable = + object.onlineAnalyzeEnable !== undefined && + object.onlineAnalyzeEnable !== null + ? Boolean(object.onlineAnalyzeEnable) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxWorkerProcesses = + object.maxWorkerProcesses !== undefined && + object.maxWorkerProcesses !== null + ? Number(object.maxWorkerProcesses) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor !== undefined && + object.autovacuumVacuumScaleFactor !== null + ? Number(object.autovacuumVacuumScaleFactor) + : undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor !== undefined && + object.autovacuumAnalyzeScaleFactor !== null + ? Number(object.autovacuumAnalyzeScaleFactor) + : undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly !== undefined && + object.defaultTransactionReadOnly !== null + ? Boolean(object.defaultTransactionReadOnly) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.enableParallelAppend = + object.enableParallelAppend !== undefined && + object.enableParallelAppend !== null + ? Boolean(object.enableParallelAppend) + : undefined; + message.enableParallelHash = + object.enableParallelHash !== undefined && + object.enableParallelHash !== null + ? Boolean(object.enableParallelHash) + : undefined; + message.enablePartitionPruning = + object.enablePartitionPruning !== undefined && + object.enablePartitionPruning !== null + ? Boolean(object.enablePartitionPruning) + : undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate !== undefined && + object.enablePartitionwiseAggregate !== null + ? Boolean(object.enablePartitionwiseAggregate) + : undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin !== undefined && + object.enablePartitionwiseJoin !== null + ? Boolean(object.enablePartitionwiseJoin) + : undefined; + message.jit = + object.jit !== undefined && object.jit !== null + ? Boolean(object.jit) + : undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers !== undefined && + object.maxParallelMaintenanceWorkers !== null + ? Number(object.maxParallelMaintenanceWorkers) + : undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation !== undefined && + object.parallelLeaderParticipation !== null + ? Boolean(object.parallelLeaderParticipation) + : undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate !== undefined && + object.logTransactionSampleRate !== null + ? Number(object.logTransactionSampleRate) + : undefined; + message.planCacheMode = + object.planCacheMode !== undefined && object.planCacheMode !== null + ? postgresqlconfig161c_PlanCacheModeFromJSON(object.planCacheMode) + : 0; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + message.sharedPreloadLibraries = (object.sharedPreloadLibraries ?? []).map( + (e: any) => postgresqlconfig161c_SharedPreloadLibrariesFromJSON(e) + ); + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration !== undefined && + object.autoExplainLogMinDuration !== null + ? Number(object.autoExplainLogMinDuration) + : undefined; + message.autoExplainLogAnalyze = + object.autoExplainLogAnalyze !== undefined && + object.autoExplainLogAnalyze !== null + ? Boolean(object.autoExplainLogAnalyze) + : undefined; + message.autoExplainLogBuffers = + object.autoExplainLogBuffers !== undefined && + object.autoExplainLogBuffers !== null + ? Boolean(object.autoExplainLogBuffers) + : undefined; + message.autoExplainLogTiming = + object.autoExplainLogTiming !== undefined && + object.autoExplainLogTiming !== null + ? Boolean(object.autoExplainLogTiming) + : undefined; + message.autoExplainLogTriggers = + object.autoExplainLogTriggers !== undefined && + object.autoExplainLogTriggers !== null + ? Boolean(object.autoExplainLogTriggers) + : undefined; + message.autoExplainLogVerbose = + object.autoExplainLogVerbose !== undefined && + object.autoExplainLogVerbose !== null + ? Boolean(object.autoExplainLogVerbose) + : undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements !== undefined && + object.autoExplainLogNestedStatements !== null + ? Boolean(object.autoExplainLogNestedStatements) + : undefined; + message.autoExplainSampleRate = + object.autoExplainSampleRate !== undefined && + object.autoExplainSampleRate !== null + ? Number(object.autoExplainSampleRate) + : undefined; + message.pgHintPlanEnableHint = + object.pgHintPlanEnableHint !== undefined && + object.pgHintPlanEnableHint !== null + ? Boolean(object.pgHintPlanEnableHint) + : undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable !== undefined && + object.pgHintPlanEnableHintTable !== null + ? Boolean(object.pgHintPlanEnableHintTable) + : undefined; + message.pgHintPlanDebugPrint = + object.pgHintPlanDebugPrint !== undefined && + object.pgHintPlanDebugPrint !== null + ? postgresqlconfig161c_PgHintPlanDebugPrintFromJSON( + object.pgHintPlanDebugPrint + ) + : 0; + message.pgHintPlanMessageLevel = + object.pgHintPlanMessageLevel !== undefined && + object.pgHintPlanMessageLevel !== null + ? postgresqlconfig161c_LogLevelFromJSON(object.pgHintPlanMessageLevel) + : 0; + message.hashMemMultiplier = + object.hashMemMultiplier !== undefined && + object.hashMemMultiplier !== null + ? Number(object.hashMemMultiplier) + : undefined; + message.logicalDecodingWorkMem = + object.logicalDecodingWorkMem !== undefined && + object.logicalDecodingWorkMem !== null + ? Number(object.logicalDecodingWorkMem) + : undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency !== undefined && + object.maintenanceIoConcurrency !== null + ? Number(object.maintenanceIoConcurrency) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.walKeepSize = + object.walKeepSize !== undefined && object.walKeepSize !== null + ? Number(object.walKeepSize) + : undefined; + message.enableIncrementalSort = + object.enableIncrementalSort !== undefined && + object.enableIncrementalSort !== null + ? Boolean(object.enableIncrementalSort) + : undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold !== undefined && + object.autovacuumVacuumInsertThreshold !== null + ? Number(object.autovacuumVacuumInsertThreshold) + : undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor !== undefined && + object.autovacuumVacuumInsertScaleFactor !== null + ? Number(object.autovacuumVacuumInsertScaleFactor) + : undefined; + message.logMinDurationSample = + object.logMinDurationSample !== undefined && + object.logMinDurationSample !== null + ? Number(object.logMinDurationSample) + : undefined; + message.logStatementSampleRate = + object.logStatementSampleRate !== undefined && + object.logStatementSampleRate !== null + ? Number(object.logStatementSampleRate) + : undefined; + message.logParameterMaxLength = + object.logParameterMaxLength !== undefined && + object.logParameterMaxLength !== null + ? Number(object.logParameterMaxLength) + : undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError !== undefined && + object.logParameterMaxLengthOnError !== null + ? Number(object.logParameterMaxLengthOnError) + : undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval !== undefined && + object.clientConnectionCheckInterval !== null + ? Number(object.clientConnectionCheckInterval) + : undefined; + message.enableAsyncAppend = + object.enableAsyncAppend !== undefined && + object.enableAsyncAppend !== null + ? Boolean(object.enableAsyncAppend) + : undefined; + message.enableGathermerge = + object.enableGathermerge !== undefined && + object.enableGathermerge !== null + ? Boolean(object.enableGathermerge) + : undefined; + message.enableMemoize = + object.enableMemoize !== undefined && object.enableMemoize !== null + ? Boolean(object.enableMemoize) + : undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits !== undefined && + object.logRecoveryConflictWaits !== null + ? Boolean(object.logRecoveryConflictWaits) + : undefined; + message.vacuumFailsafeAge = + object.vacuumFailsafeAge !== undefined && + object.vacuumFailsafeAge !== null + ? Number(object.vacuumFailsafeAge) + : undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge !== undefined && + object.vacuumMultixactFailsafeAge !== null + ? Number(object.vacuumMultixactFailsafeAge) + : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; + message.plantunerFixEmptyTable = + object.plantunerFixEmptyTable !== undefined && + object.plantunerFixEmptyTable !== null + ? Boolean(object.plantunerFixEmptyTable) + : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.enableGroupByReordering = + object.enableGroupByReordering !== undefined && + object.enableGroupByReordering !== null + ? Boolean(object.enableGroupByReordering) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoPoolSize = + object.geqoPoolSize !== undefined && object.geqoPoolSize !== null + ? Number(object.geqoPoolSize) + : undefined; + message.geqoGenerations = + object.geqoGenerations !== undefined && object.geqoGenerations !== null + ? Number(object.geqoGenerations) + : undefined; + message.geqoSelectionBias = + object.geqoSelectionBias !== undefined && + object.geqoSelectionBias !== null + ? Number(object.geqoSelectionBias) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold !== undefined && + object.pgTrgmSimilarityThreshold !== null + ? Number(object.pgTrgmSimilarityThreshold) + : undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold !== undefined && + object.pgTrgmWordSimilarityThreshold !== null + ? Number(object.pgTrgmWordSimilarityThreshold) + : undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold !== undefined && + object.pgTrgmStrictWordSimilarityThreshold !== null + ? Number(object.pgTrgmStrictWordSimilarityThreshold) + : undefined; + message.maxStandbyArchiveDelay = + object.maxStandbyArchiveDelay !== undefined && + object.maxStandbyArchiveDelay !== null + ? Number(object.maxStandbyArchiveDelay) + : undefined; + message.sessionDurationTimeout = + object.sessionDurationTimeout !== undefined && + object.sessionDurationTimeout !== null + ? Number(object.sessionDurationTimeout) + : undefined; + message.logReplicationCommands = + object.logReplicationCommands !== undefined && + object.logReplicationCommands !== null + ? Boolean(object.logReplicationCommands) + : undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration !== undefined && + object.logAutovacuumMinDuration !== null + ? Number(object.logAutovacuumMinDuration) + : undefined; + return message; + }, + + toJSON(message: Postgresqlconfig161c): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.maintenanceWorkMem !== undefined && + (obj.maintenanceWorkMem = message.maintenanceWorkMem); + message.autovacuumWorkMem !== undefined && + (obj.autovacuumWorkMem = message.autovacuumWorkMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.vacuumCostDelay !== undefined && + (obj.vacuumCostDelay = message.vacuumCostDelay); + message.vacuumCostPageHit !== undefined && + (obj.vacuumCostPageHit = message.vacuumCostPageHit); + message.vacuumCostPageMiss !== undefined && + (obj.vacuumCostPageMiss = message.vacuumCostPageMiss); + message.vacuumCostPageDirty !== undefined && + (obj.vacuumCostPageDirty = message.vacuumCostPageDirty); + message.vacuumCostLimit !== undefined && + (obj.vacuumCostLimit = message.vacuumCostLimit); + message.bgwriterDelay !== undefined && + (obj.bgwriterDelay = message.bgwriterDelay); + message.bgwriterLruMaxpages !== undefined && + (obj.bgwriterLruMaxpages = message.bgwriterLruMaxpages); + message.bgwriterLruMultiplier !== undefined && + (obj.bgwriterLruMultiplier = message.bgwriterLruMultiplier); + message.bgwriterFlushAfter !== undefined && + (obj.bgwriterFlushAfter = message.bgwriterFlushAfter); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.walLevel !== undefined && + (obj.walLevel = postgresqlconfig161c_WalLevelToJSON(message.walLevel)); + message.synchronousCommit !== undefined && + (obj.synchronousCommit = postgresqlconfig161c_SynchronousCommitToJSON( + message.synchronousCommit + )); + message.checkpointTimeout !== undefined && + (obj.checkpointTimeout = message.checkpointTimeout); + message.checkpointCompletionTarget !== undefined && + (obj.checkpointCompletionTarget = message.checkpointCompletionTarget); + message.checkpointFlushAfter !== undefined && + (obj.checkpointFlushAfter = message.checkpointFlushAfter); + message.maxWalSize !== undefined && (obj.maxWalSize = message.maxWalSize); + message.minWalSize !== undefined && (obj.minWalSize = message.minWalSize); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.defaultStatisticsTarget !== undefined && + (obj.defaultStatisticsTarget = message.defaultStatisticsTarget); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = postgresqlconfig161c_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.debugParallelQuery !== undefined && + (obj.debugParallelQuery = postgresqlconfig161c_DebugParallelQueryToJSON( + message.debugParallelQuery + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlconfig161c_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlconfig161c_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlconfig161c_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlconfig161c_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlconfig161c_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlconfig161c_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlconfig161c_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlconfig161c_XmlBinaryToJSON(message.xmlbinary)); + message.xmloption !== undefined && + (obj.xmloption = postgresqlconfig161c_XmlOptionToJSON(message.xmloption)); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlconfig161c_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.autovacuumMaxWorkers !== undefined && + (obj.autovacuumMaxWorkers = message.autovacuumMaxWorkers); + message.autovacuumVacuumCostDelay !== undefined && + (obj.autovacuumVacuumCostDelay = message.autovacuumVacuumCostDelay); + message.autovacuumVacuumCostLimit !== undefined && + (obj.autovacuumVacuumCostLimit = message.autovacuumVacuumCostLimit); + message.autovacuumNaptime !== undefined && + (obj.autovacuumNaptime = message.autovacuumNaptime); + message.archiveTimeout !== undefined && + (obj.archiveTimeout = message.archiveTimeout); + message.trackActivityQuerySize !== undefined && + (obj.trackActivityQuerySize = message.trackActivityQuerySize); + message.onlineAnalyzeEnable !== undefined && + (obj.onlineAnalyzeEnable = message.onlineAnalyzeEnable); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxWorkerProcesses !== undefined && + (obj.maxWorkerProcesses = message.maxWorkerProcesses); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.autovacuumVacuumScaleFactor !== undefined && + (obj.autovacuumVacuumScaleFactor = message.autovacuumVacuumScaleFactor); + message.autovacuumAnalyzeScaleFactor !== undefined && + (obj.autovacuumAnalyzeScaleFactor = message.autovacuumAnalyzeScaleFactor); + message.defaultTransactionReadOnly !== undefined && + (obj.defaultTransactionReadOnly = message.defaultTransactionReadOnly); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.enableParallelAppend !== undefined && + (obj.enableParallelAppend = message.enableParallelAppend); + message.enableParallelHash !== undefined && + (obj.enableParallelHash = message.enableParallelHash); + message.enablePartitionPruning !== undefined && + (obj.enablePartitionPruning = message.enablePartitionPruning); + message.enablePartitionwiseAggregate !== undefined && + (obj.enablePartitionwiseAggregate = message.enablePartitionwiseAggregate); + message.enablePartitionwiseJoin !== undefined && + (obj.enablePartitionwiseJoin = message.enablePartitionwiseJoin); + message.jit !== undefined && (obj.jit = message.jit); + message.maxParallelMaintenanceWorkers !== undefined && + (obj.maxParallelMaintenanceWorkers = + message.maxParallelMaintenanceWorkers); + message.parallelLeaderParticipation !== undefined && + (obj.parallelLeaderParticipation = message.parallelLeaderParticipation); + message.logTransactionSampleRate !== undefined && + (obj.logTransactionSampleRate = message.logTransactionSampleRate); + message.planCacheMode !== undefined && + (obj.planCacheMode = postgresqlconfig161c_PlanCacheModeToJSON( + message.planCacheMode + )); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + if (message.sharedPreloadLibraries) { + obj.sharedPreloadLibraries = message.sharedPreloadLibraries.map((e) => + postgresqlconfig161c_SharedPreloadLibrariesToJSON(e) + ); + } else { + obj.sharedPreloadLibraries = []; + } + message.autoExplainLogMinDuration !== undefined && + (obj.autoExplainLogMinDuration = message.autoExplainLogMinDuration); + message.autoExplainLogAnalyze !== undefined && + (obj.autoExplainLogAnalyze = message.autoExplainLogAnalyze); + message.autoExplainLogBuffers !== undefined && + (obj.autoExplainLogBuffers = message.autoExplainLogBuffers); + message.autoExplainLogTiming !== undefined && + (obj.autoExplainLogTiming = message.autoExplainLogTiming); + message.autoExplainLogTriggers !== undefined && + (obj.autoExplainLogTriggers = message.autoExplainLogTriggers); + message.autoExplainLogVerbose !== undefined && + (obj.autoExplainLogVerbose = message.autoExplainLogVerbose); + message.autoExplainLogNestedStatements !== undefined && + (obj.autoExplainLogNestedStatements = + message.autoExplainLogNestedStatements); + message.autoExplainSampleRate !== undefined && + (obj.autoExplainSampleRate = message.autoExplainSampleRate); + message.pgHintPlanEnableHint !== undefined && + (obj.pgHintPlanEnableHint = message.pgHintPlanEnableHint); + message.pgHintPlanEnableHintTable !== undefined && + (obj.pgHintPlanEnableHintTable = message.pgHintPlanEnableHintTable); + message.pgHintPlanDebugPrint !== undefined && + (obj.pgHintPlanDebugPrint = + postgresqlconfig161c_PgHintPlanDebugPrintToJSON( + message.pgHintPlanDebugPrint + )); + message.pgHintPlanMessageLevel !== undefined && + (obj.pgHintPlanMessageLevel = postgresqlconfig161c_LogLevelToJSON( + message.pgHintPlanMessageLevel + )); + message.hashMemMultiplier !== undefined && + (obj.hashMemMultiplier = message.hashMemMultiplier); + message.logicalDecodingWorkMem !== undefined && + (obj.logicalDecodingWorkMem = message.logicalDecodingWorkMem); + message.maintenanceIoConcurrency !== undefined && + (obj.maintenanceIoConcurrency = message.maintenanceIoConcurrency); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.walKeepSize !== undefined && + (obj.walKeepSize = message.walKeepSize); + message.enableIncrementalSort !== undefined && + (obj.enableIncrementalSort = message.enableIncrementalSort); + message.autovacuumVacuumInsertThreshold !== undefined && + (obj.autovacuumVacuumInsertThreshold = + message.autovacuumVacuumInsertThreshold); + message.autovacuumVacuumInsertScaleFactor !== undefined && + (obj.autovacuumVacuumInsertScaleFactor = + message.autovacuumVacuumInsertScaleFactor); + message.logMinDurationSample !== undefined && + (obj.logMinDurationSample = message.logMinDurationSample); + message.logStatementSampleRate !== undefined && + (obj.logStatementSampleRate = message.logStatementSampleRate); + message.logParameterMaxLength !== undefined && + (obj.logParameterMaxLength = message.logParameterMaxLength); + message.logParameterMaxLengthOnError !== undefined && + (obj.logParameterMaxLengthOnError = message.logParameterMaxLengthOnError); + message.clientConnectionCheckInterval !== undefined && + (obj.clientConnectionCheckInterval = + message.clientConnectionCheckInterval); + message.enableAsyncAppend !== undefined && + (obj.enableAsyncAppend = message.enableAsyncAppend); + message.enableGathermerge !== undefined && + (obj.enableGathermerge = message.enableGathermerge); + message.enableMemoize !== undefined && + (obj.enableMemoize = message.enableMemoize); + message.logRecoveryConflictWaits !== undefined && + (obj.logRecoveryConflictWaits = message.logRecoveryConflictWaits); + message.vacuumFailsafeAge !== undefined && + (obj.vacuumFailsafeAge = message.vacuumFailsafeAge); + message.vacuumMultixactFailsafeAge !== undefined && + (obj.vacuumMultixactFailsafeAge = message.vacuumMultixactFailsafeAge); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.plantunerFixEmptyTable !== undefined && + (obj.plantunerFixEmptyTable = message.plantunerFixEmptyTable); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.enableGroupByReordering !== undefined && + (obj.enableGroupByReordering = message.enableGroupByReordering); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoPoolSize !== undefined && + (obj.geqoPoolSize = message.geqoPoolSize); + message.geqoGenerations !== undefined && + (obj.geqoGenerations = message.geqoGenerations); + message.geqoSelectionBias !== undefined && + (obj.geqoSelectionBias = message.geqoSelectionBias); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + message.pgTrgmSimilarityThreshold !== undefined && + (obj.pgTrgmSimilarityThreshold = message.pgTrgmSimilarityThreshold); + message.pgTrgmWordSimilarityThreshold !== undefined && + (obj.pgTrgmWordSimilarityThreshold = + message.pgTrgmWordSimilarityThreshold); + message.pgTrgmStrictWordSimilarityThreshold !== undefined && + (obj.pgTrgmStrictWordSimilarityThreshold = + message.pgTrgmStrictWordSimilarityThreshold); + message.maxStandbyArchiveDelay !== undefined && + (obj.maxStandbyArchiveDelay = message.maxStandbyArchiveDelay); + message.sessionDurationTimeout !== undefined && + (obj.sessionDurationTimeout = message.sessionDurationTimeout); + message.logReplicationCommands !== undefined && + (obj.logReplicationCommands = message.logReplicationCommands); + message.logAutovacuumMinDuration !== undefined && + (obj.logAutovacuumMinDuration = message.logAutovacuumMinDuration); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlconfig161c { + const message = { ...basePostgresqlconfig161c } as Postgresqlconfig161c; + message.maxConnections = object.maxConnections ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.workMem = object.workMem ?? undefined; + message.maintenanceWorkMem = object.maintenanceWorkMem ?? undefined; + message.autovacuumWorkMem = object.autovacuumWorkMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.vacuumCostDelay = object.vacuumCostDelay ?? undefined; + message.vacuumCostPageHit = object.vacuumCostPageHit ?? undefined; + message.vacuumCostPageMiss = object.vacuumCostPageMiss ?? undefined; + message.vacuumCostPageDirty = object.vacuumCostPageDirty ?? undefined; + message.vacuumCostLimit = object.vacuumCostLimit ?? undefined; + message.bgwriterDelay = object.bgwriterDelay ?? undefined; + message.bgwriterLruMaxpages = object.bgwriterLruMaxpages ?? undefined; + message.bgwriterLruMultiplier = object.bgwriterLruMultiplier ?? undefined; + message.bgwriterFlushAfter = object.bgwriterFlushAfter ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.walLevel = object.walLevel ?? 0; + message.synchronousCommit = object.synchronousCommit ?? 0; + message.checkpointTimeout = object.checkpointTimeout ?? undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget ?? undefined; + message.checkpointFlushAfter = object.checkpointFlushAfter ?? undefined; + message.maxWalSize = object.maxWalSize ?? undefined; + message.minWalSize = object.minWalSize ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.debugParallelQuery = object.debugParallelQuery ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.autovacuumMaxWorkers = object.autovacuumMaxWorkers ?? undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay ?? undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit ?? undefined; + message.autovacuumNaptime = object.autovacuumNaptime ?? undefined; + message.archiveTimeout = object.archiveTimeout ?? undefined; + message.trackActivityQuerySize = object.trackActivityQuerySize ?? undefined; + message.onlineAnalyzeEnable = object.onlineAnalyzeEnable ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxWorkerProcesses = object.maxWorkerProcesses ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor ?? undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor ?? undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly ?? undefined; + message.timezone = object.timezone ?? ""; + message.enableParallelAppend = object.enableParallelAppend ?? undefined; + message.enableParallelHash = object.enableParallelHash ?? undefined; + message.enablePartitionPruning = object.enablePartitionPruning ?? undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate ?? undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin ?? undefined; + message.jit = object.jit ?? undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers ?? undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation ?? undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate ?? undefined; + message.planCacheMode = object.planCacheMode ?? 0; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + message.sharedPreloadLibraries = + object.sharedPreloadLibraries?.map((e) => e) || []; + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration ?? undefined; + message.autoExplainLogAnalyze = object.autoExplainLogAnalyze ?? undefined; + message.autoExplainLogBuffers = object.autoExplainLogBuffers ?? undefined; + message.autoExplainLogTiming = object.autoExplainLogTiming ?? undefined; + message.autoExplainLogTriggers = object.autoExplainLogTriggers ?? undefined; + message.autoExplainLogVerbose = object.autoExplainLogVerbose ?? undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements ?? undefined; + message.autoExplainSampleRate = object.autoExplainSampleRate ?? undefined; + message.pgHintPlanEnableHint = object.pgHintPlanEnableHint ?? undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable ?? undefined; + message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; + message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.hashMemMultiplier = object.hashMemMultiplier ?? undefined; + message.logicalDecodingWorkMem = object.logicalDecodingWorkMem ?? undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.walKeepSize = object.walKeepSize ?? undefined; + message.enableIncrementalSort = object.enableIncrementalSort ?? undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold ?? undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor ?? undefined; + message.logMinDurationSample = object.logMinDurationSample ?? undefined; + message.logStatementSampleRate = object.logStatementSampleRate ?? undefined; + message.logParameterMaxLength = object.logParameterMaxLength ?? undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError ?? undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval ?? undefined; + message.enableAsyncAppend = object.enableAsyncAppend ?? undefined; + message.enableGathermerge = object.enableGathermerge ?? undefined; + message.enableMemoize = object.enableMemoize ?? undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits ?? undefined; + message.vacuumFailsafeAge = object.vacuumFailsafeAge ?? undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.plantunerFixEmptyTable = object.plantunerFixEmptyTable ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.enableGroupByReordering = + object.enableGroupByReordering ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoPoolSize = object.geqoPoolSize ?? undefined; + message.geqoGenerations = object.geqoGenerations ?? undefined; + message.geqoSelectionBias = object.geqoSelectionBias ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; + message.pgTrgmSimilarityThreshold = + object.pgTrgmSimilarityThreshold ?? undefined; + message.pgTrgmWordSimilarityThreshold = + object.pgTrgmWordSimilarityThreshold ?? undefined; + message.pgTrgmStrictWordSimilarityThreshold = + object.pgTrgmStrictWordSimilarityThreshold ?? undefined; + message.maxStandbyArchiveDelay = object.maxStandbyArchiveDelay ?? undefined; + message.sessionDurationTimeout = object.sessionDurationTimeout ?? undefined; + message.logReplicationCommands = object.logReplicationCommands ?? undefined; + message.logAutovacuumMinDuration = + object.logAutovacuumMinDuration ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Postgresqlconfig161c.$type, Postgresqlconfig161c); + +const basePostgresqlconfigset161c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet16_1C", +}; + +export const Postgresqlconfigset161c = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet16_1C" as const, + + encode( + message: Postgresqlconfigset161c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Postgresqlconfig161c.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Postgresqlconfig161c.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Postgresqlconfig161c.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlconfigset161c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePostgresqlconfigset161c, + } as Postgresqlconfigset161c; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Postgresqlconfig161c.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Postgresqlconfig161c.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Postgresqlconfig161c.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlconfigset161c { + const message = { + ...basePostgresqlconfigset161c, + } as Postgresqlconfigset161c; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Postgresqlconfig161c.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Postgresqlconfig161c.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Postgresqlconfig161c.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Postgresqlconfigset161c): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Postgresqlconfig161c.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Postgresqlconfig161c.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Postgresqlconfig161c.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlconfigset161c { + const message = { + ...basePostgresqlconfigset161c, + } as Postgresqlconfigset161c; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Postgresqlconfig161c.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Postgresqlconfig161c.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Postgresqlconfig161c.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Postgresqlconfigset161c.$type, Postgresqlconfigset161c); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/database.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/database.ts index 0d6d7bc7..4d5ab520 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/database.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/database.ts @@ -2,6 +2,7 @@ import { messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import _m0 from "protobufjs/minimal"; +import { BoolValue } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; @@ -31,13 +32,19 @@ export interface Database { extensions: Extension[]; /** Name of the database template. */ templateDb: string; + /** + * Deletion Protection inhibits deletion of the database + * + * Default value: `unspecified` (inherits cluster's deletion_protection) + */ + deletionProtection?: boolean; } export interface Extension { $type: "yandex.cloud.mdb.postgresql.v1.Extension"; /** * Name of the extension, e.g. `pg_trgm` or `pg_btree`. - * Extensions supported by Managed Service for PostgreSQL are [listed in the Developer's Guide](/docs/managed-postgresql/operations/cluster-extensions). + * Extensions supported by Managed Service for PostgreSQL are [listed in the Developer's Guide](/docs/managed-postgresql/operations/extensions/cluster-extensions). */ name: string; /** Version of the extension. */ @@ -67,6 +74,12 @@ export interface DatabaseSpec { extensions: Extension[]; /** Name of the PostgreSQL database template. */ templateDb: string; + /** + * Deletion Protection inhibits deletion of the database + * + * Default value: `unspecified` (inherits cluster's deletion_protection) + */ + deletionProtection?: boolean; } const baseDatabase: object = { @@ -107,6 +120,15 @@ export const Database = { if (message.templateDb !== "") { writer.uint32(58).string(message.templateDb); } + if (message.deletionProtection !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.deletionProtection!, + }, + writer.uint32(66).fork() + ).ldelim(); + } return writer; }, @@ -139,6 +161,12 @@ export const Database = { case 7: message.templateDb = reader.string(); break; + case 8: + message.deletionProtection = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -176,6 +204,11 @@ export const Database = { object.templateDb !== undefined && object.templateDb !== null ? String(object.templateDb) : ""; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : undefined; return message; }, @@ -194,6 +227,8 @@ export const Database = { obj.extensions = []; } message.templateDb !== undefined && (obj.templateDb = message.templateDb); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -207,6 +242,7 @@ export const Database = { message.extensions = object.extensions?.map((e) => Extension.fromPartial(e)) || []; message.templateDb = object.templateDb ?? ""; + message.deletionProtection = object.deletionProtection ?? undefined; return message; }, }; @@ -322,6 +358,15 @@ export const DatabaseSpec = { if (message.templateDb !== "") { writer.uint32(50).string(message.templateDb); } + if (message.deletionProtection !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.deletionProtection!, + }, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -351,6 +396,12 @@ export const DatabaseSpec = { case 6: message.templateDb = reader.string(); break; + case 7: + message.deletionProtection = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -384,6 +435,11 @@ export const DatabaseSpec = { object.templateDb !== undefined && object.templateDb !== null ? String(object.templateDb) : ""; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : undefined; return message; }, @@ -401,6 +457,8 @@ export const DatabaseSpec = { obj.extensions = []; } message.templateDb !== undefined && (obj.templateDb = message.templateDb); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -415,6 +473,7 @@ export const DatabaseSpec = { message.extensions = object.extensions?.map((e) => Extension.fromPartial(e)) || []; message.templateDb = object.templateDb ?? ""; + message.deletionProtection = object.deletionProtection ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/database_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/database_service.ts index 0f673122..4d4d2759 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/database_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/database_service.ts @@ -21,6 +21,7 @@ import { } from "../../../../../yandex/cloud/mdb/postgresql/v1/database"; import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; +import { BoolValue } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; @@ -113,6 +114,12 @@ export interface UpdateDatabaseRequest { * Therefore, to disable an active extension you should simply send the list omitting this extension. */ extensions: Extension[]; + /** + * Deletion Protection inhibits deletion of the database + * + * Default value: `unspecified` (inherits cluster's deletion_protection) + */ + deletionProtection?: boolean; } export interface UpdateDatabaseMetadata { @@ -592,6 +599,15 @@ export const UpdateDatabaseRequest = { for (const v of message.extensions) { Extension.encode(v!, writer.uint32(34).fork()).ldelim(); } + if (message.deletionProtection !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.deletionProtection!, + }, + writer.uint32(50).fork() + ).ldelim(); + } return writer; }, @@ -621,6 +637,12 @@ export const UpdateDatabaseRequest = { case 4: message.extensions.push(Extension.decode(reader, reader.uint32())); break; + case 6: + message.deletionProtection = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -650,6 +672,11 @@ export const UpdateDatabaseRequest = { message.extensions = (object.extensions ?? []).map((e: any) => Extension.fromJSON(e) ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : undefined; return message; }, @@ -671,6 +698,8 @@ export const UpdateDatabaseRequest = { } else { obj.extensions = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -687,6 +716,7 @@ export const UpdateDatabaseRequest = { : undefined; message.extensions = object.extensions?.map((e) => Extension.fromPartial(e)) || []; + message.deletionProtection = object.deletionProtection ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/perf_diag.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/perf_diag.ts new file mode 100644 index 00000000..b2942e9f --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/perf_diag.ts @@ -0,0 +1,1643 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; + +export interface SessionState { + $type: "yandex.cloud.mdb.postgresql.v1.SessionState"; + /** Time of collecting statistics on sessions (in the [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format). */ + time?: Date; + /** Host of the connected client. */ + host: string; + /** Server process ID. For client connections, this is a client connection ID. */ + pid: number; + /** Database ID. */ + database: string; + /** User ID. */ + user: string; + /** Application name on the connected client. */ + applicationName: string; + /** Time when a given process was started. For client connections, this is the time when the client connected to the server. */ + backendStart?: Date; + /** + * Time when a transaction of a given process was started. Returns [NULL] if no transaction is active. + * + * If the currently active query is the first of its transaction, the value of this parameter is equal to the value of the [query_start] parameter. + */ + xactStart?: Date; + /** + * Time when the currently active query was started. + * + * If the [state] parameter does not take the value [active], the parameter returns the time when the lastest query was started. + */ + queryStart?: Date; + /** Time when the [state] parameter was last changed. */ + stateChange?: Date; + /** + * Type of event for which the backend is waiting. Such an event is called a wait event. A backend refers to the process that maintains the client connection. + * + * For the list of wait events, see the [PostgreSQL documentation](https://www.postgresql.org/docs/current/monitoring-stats.html#WAIT-EVENT-TABLE). If the backend is not waiting for any event, the parameter returns [NULL]. + */ + waitEventType: string; + /** + * Wait event name. + * + * For the list of such names, see the [PostgreSQL documentation](https://www.postgresql.org/docs/current/monitoring-stats.html#WAIT-EVENT-ACTIVITY-TABLE). If the backend is not waiting for any event, the parameter returns [NULL]. + */ + waitEvent: string; + /** Current backend state. For the list of possible values, see the [PostgreSQL documentation](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW). */ + state: string; + /** + * Text of the most recent query. + * + * If the [state] parameter takes the value [active], the parameter shows the currently executing query. For the rest of the states, the parameter shows the last query that was executed. By default, the query text is truncated to 1024 bytes. + */ + query: string; + /** Current backend type. For the list of possible values, see the [PostgreSQL documentation](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW). */ + backendType: string; + /** + * IP address of the connected client. + * + * The parameter returns [NULL] in the following cases: + * - The client is connected via a Unix socket on the server. + * - A given process is internal (for example, autovacuum). + */ + clientAddr: string; + /** Host name of the connected client (relevant for IP connections). */ + clientHostname: string; + /** + * TCP port number that the client is using for communication with the server. + * + * Returns [-1] if the client is connected via a Unix socket on the server. Returns [NULL] if a given process is internal (for example, autovacuum). + */ + clientPort: number; + /** Top-level transaction ID, if any. */ + backendXid: number; + /** Current [xmin horizon]. */ + backendXmin: number; + /** Process IDs that are blocking a given server process ID. */ + blockingPids: string; + /** Query ID. */ + queryId: string; +} + +export interface PrimaryKey { + $type: "yandex.cloud.mdb.postgresql.v1.PrimaryKey"; + /** Host of the connected client. */ + host: string; + /** User ID. */ + user: string; + /** Database ID. */ + database: string; + /** Returns [true] if a query is executed as a top-level SQL statement or if the [pg_stat_statements.track](https://www.postgresql.org/docs/current/pgstatstatements.html#id-1.11.7.41.9) parameter is set to the value [top]. */ + toplevel: boolean; + /** Query ID. */ + queryId: string; + /** Query planning ID. */ + planId: string; +} + +export interface QueryStats { + $type: "yandex.cloud.mdb.postgresql.v1.QueryStats"; + /** Time of collecting statistics on planning and execution of queries. */ + time?: Date; + /** Statement text. */ + query: string; + /** Normalized query plan. */ + normalizedPlan: string; + /** Example of a query execution plan (without normalization). */ + examplePlan: string; + /** + * Number of times that a query was planned. + * + * The parameter returns a non-zero value if the [pg_stat_statements.track_planning](https://www.postgresql.org/docs/current/pgstatstatements.html#id-1.11.7.41.9) parameter is enabled. + */ + plans: number; + /** + * Total time taken to plan a query, in milliseconds. + * + * The parameter returns a non-zero value if the [pg_stat_statements.track_planning] parameter is enabled. + */ + totalPlanTime: number; + /** + * Minimum time taken to plan a query, in milliseconds. + * + * The parameter returns a non-zero value if the [pg_stat_statements.track_planning] parameter is enabled. + */ + minPlanTime: number; + /** + * Maximum time taken to plan a query, in milliseconds. + * + * The parameter returns a non-zero value if the [pg_stat_statements.track_planning] parameter is enabled. + */ + maxPlanTime: number; + /** + * Average time taken to plan a query, in milliseconds. + * + * The parameter returns a non-zero value if the [pg_stat_statements.track_planning] parameter is enabled. + */ + meanPlanTime: number; + /** + * Population standard deviation of the time taken to plan a query, in milliseconds. + * + * The parameter returns a non-zero value if the [pg_stat_statements.track_planning] parameter is enabled. + */ + stddevPlanTime: number; + /** Number of times that a query was executed. */ + calls: number; + /** Total time taken to execute a query, in milliseconds. */ + totalTime: number; + /** Minimum time taken to execute a query, in milliseconds. */ + minTime: number; + /** Maximum time taken to execute a query, in milliseconds. */ + maxTime: number; + /** Average time taken to execute a query, in milliseconds. */ + meanTime: number; + /** Population standard deviation of the time taken to execute a query, in milliseconds. */ + stddevTime: number; + /** Number of retrieved or affected rows. */ + rows: number; + /** Number of shared blocks that are hit from cache. */ + sharedBlksHit: number; + /** Number of read shared blocks. */ + sharedBlksRead: number; + /** Number of 'dirtied' shared blocks. */ + sharedBlksDirtied: number; + /** Number of written shared blocks. */ + sharedBlksWritten: number; + /** Number of local blocks that are hit from cache. */ + localBlksHit: number; + /** Number of read local blocks. */ + localBlksRead: number; + /** Number of 'dirtied' local blocks. */ + localBlksDirtied: number; + /** Number of written local blocks. */ + localBlksWritten: number; + /** Number of read temporary blocks. */ + tempBlksRead: number; + /** Number of written temporary blocks. */ + tempBlksWritten: number; + /** + * Time taken to read data blocks, in milliseconds. + * + * The parameter returns a non-zero value if the [track_io_timing](https://www.postgresql.org/docs/current/runtime-config-statistics.html#GUC-TRACK-IO-TIMING) parameter is enabled. + */ + blkReadTime: number; + /** + * Time taken to record data blocks, in milliseconds. + * + * The parameter returns a non-zero value if the [track_io_timing] parameter is enabled. + */ + blkWriteTime: number; + /** + * Time taken to read temporary data blocks, in milliseconds. + * + * The parameter returns a non-zero value if the [track_io_timing] parameter is enabled. + */ + tempBlkReadTime: number; + /** + * Time taken to record temporary data blocks, in milliseconds. + * + * The parameter returns a non-zero value if the [track_io_timing] parameter is enabled. + */ + tempBlkWriteTime: number; + /** Number of WAL records generated during a given period. */ + walRecords: number; + /** Number of WAL full page images generated during a given period. */ + walFpi: number; + /** Number of WAL logs generated during a given period, in bytes. */ + walBytes: number; + /** Number of JIT-compiled functions. */ + jitFunctions: number; + /** Time taken to generate JIT code, in milliseconds. */ + jitGenerationTime: number; + /** Number of times that functions have been inlined. */ + jitInliningCount: number; + /** Time taken to inline functions, in milliseconds. */ + jitInliningTime: number; + /** Number of times that a query was optimized. */ + jitOptimizationCount: number; + /** Time taken to optimize a query, in milliseconds. */ + jitOptimizationTime: number; + /** Number of times that code was emitted. */ + jitEmissionCount: number; + /** Time taken to emit code. */ + jitEmissionTime: number; + /** Cost of receiving a response to a query before the first row of the response is issued. */ + startupCost: number; + /** Cost of receiving a response to a query when all the rows of the response are issued. */ + totalCost: number; + /** Expected number of rows that a given plan node should issue. */ + planRows: number; + /** Expected average size of rows that a given plan node should issue. */ + planWidth: number; + /** Number of bytes that the filesystem layer has read. */ + reads: number; + /** Number of bytes that the filesystem layer has written. */ + writes: number; + /** User CPU time used. */ + userTime: number; + /** System CPU time used. */ + systemTime: number; +} + +export interface QueryStatement { + $type: "yandex.cloud.mdb.postgresql.v1.QueryStatement"; + /** Primary keys in tables with the statistics on planning and execution of queries. */ + key?: PrimaryKey; + /** Statistics on planning and execution of queries. */ + stats?: QueryStats; +} + +const baseSessionState: object = { + $type: "yandex.cloud.mdb.postgresql.v1.SessionState", + host: "", + pid: 0, + database: "", + user: "", + applicationName: "", + waitEventType: "", + waitEvent: "", + state: "", + query: "", + backendType: "", + clientAddr: "", + clientHostname: "", + clientPort: 0, + backendXid: 0, + backendXmin: 0, + blockingPids: "", + queryId: "", +}; + +export const SessionState = { + $type: "yandex.cloud.mdb.postgresql.v1.SessionState" as const, + + encode( + message: SessionState, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.time !== undefined) { + Timestamp.encode( + toTimestamp(message.time), + writer.uint32(10).fork() + ).ldelim(); + } + if (message.host !== "") { + writer.uint32(18).string(message.host); + } + if (message.pid !== 0) { + writer.uint32(24).int64(message.pid); + } + if (message.database !== "") { + writer.uint32(34).string(message.database); + } + if (message.user !== "") { + writer.uint32(42).string(message.user); + } + if (message.applicationName !== "") { + writer.uint32(50).string(message.applicationName); + } + if (message.backendStart !== undefined) { + Timestamp.encode( + toTimestamp(message.backendStart), + writer.uint32(58).fork() + ).ldelim(); + } + if (message.xactStart !== undefined) { + Timestamp.encode( + toTimestamp(message.xactStart), + writer.uint32(66).fork() + ).ldelim(); + } + if (message.queryStart !== undefined) { + Timestamp.encode( + toTimestamp(message.queryStart), + writer.uint32(74).fork() + ).ldelim(); + } + if (message.stateChange !== undefined) { + Timestamp.encode( + toTimestamp(message.stateChange), + writer.uint32(82).fork() + ).ldelim(); + } + if (message.waitEventType !== "") { + writer.uint32(90).string(message.waitEventType); + } + if (message.waitEvent !== "") { + writer.uint32(98).string(message.waitEvent); + } + if (message.state !== "") { + writer.uint32(106).string(message.state); + } + if (message.query !== "") { + writer.uint32(114).string(message.query); + } + if (message.backendType !== "") { + writer.uint32(122).string(message.backendType); + } + if (message.clientAddr !== "") { + writer.uint32(130).string(message.clientAddr); + } + if (message.clientHostname !== "") { + writer.uint32(138).string(message.clientHostname); + } + if (message.clientPort !== 0) { + writer.uint32(144).int64(message.clientPort); + } + if (message.backendXid !== 0) { + writer.uint32(152).int64(message.backendXid); + } + if (message.backendXmin !== 0) { + writer.uint32(160).int64(message.backendXmin); + } + if (message.blockingPids !== "") { + writer.uint32(178).string(message.blockingPids); + } + if (message.queryId !== "") { + writer.uint32(186).string(message.queryId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SessionState { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSessionState } as SessionState; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.time = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 2: + message.host = reader.string(); + break; + case 3: + message.pid = longToNumber(reader.int64() as Long); + break; + case 4: + message.database = reader.string(); + break; + case 5: + message.user = reader.string(); + break; + case 6: + message.applicationName = reader.string(); + break; + case 7: + message.backendStart = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 8: + message.xactStart = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 9: + message.queryStart = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 10: + message.stateChange = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 11: + message.waitEventType = reader.string(); + break; + case 12: + message.waitEvent = reader.string(); + break; + case 13: + message.state = reader.string(); + break; + case 14: + message.query = reader.string(); + break; + case 15: + message.backendType = reader.string(); + break; + case 16: + message.clientAddr = reader.string(); + break; + case 17: + message.clientHostname = reader.string(); + break; + case 18: + message.clientPort = longToNumber(reader.int64() as Long); + break; + case 19: + message.backendXid = longToNumber(reader.int64() as Long); + break; + case 20: + message.backendXmin = longToNumber(reader.int64() as Long); + break; + case 22: + message.blockingPids = reader.string(); + break; + case 23: + message.queryId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SessionState { + const message = { ...baseSessionState } as SessionState; + message.time = + object.time !== undefined && object.time !== null + ? fromJsonTimestamp(object.time) + : undefined; + message.host = + object.host !== undefined && object.host !== null + ? String(object.host) + : ""; + message.pid = + object.pid !== undefined && object.pid !== null ? Number(object.pid) : 0; + message.database = + object.database !== undefined && object.database !== null + ? String(object.database) + : ""; + message.user = + object.user !== undefined && object.user !== null + ? String(object.user) + : ""; + message.applicationName = + object.applicationName !== undefined && object.applicationName !== null + ? String(object.applicationName) + : ""; + message.backendStart = + object.backendStart !== undefined && object.backendStart !== null + ? fromJsonTimestamp(object.backendStart) + : undefined; + message.xactStart = + object.xactStart !== undefined && object.xactStart !== null + ? fromJsonTimestamp(object.xactStart) + : undefined; + message.queryStart = + object.queryStart !== undefined && object.queryStart !== null + ? fromJsonTimestamp(object.queryStart) + : undefined; + message.stateChange = + object.stateChange !== undefined && object.stateChange !== null + ? fromJsonTimestamp(object.stateChange) + : undefined; + message.waitEventType = + object.waitEventType !== undefined && object.waitEventType !== null + ? String(object.waitEventType) + : ""; + message.waitEvent = + object.waitEvent !== undefined && object.waitEvent !== null + ? String(object.waitEvent) + : ""; + message.state = + object.state !== undefined && object.state !== null + ? String(object.state) + : ""; + message.query = + object.query !== undefined && object.query !== null + ? String(object.query) + : ""; + message.backendType = + object.backendType !== undefined && object.backendType !== null + ? String(object.backendType) + : ""; + message.clientAddr = + object.clientAddr !== undefined && object.clientAddr !== null + ? String(object.clientAddr) + : ""; + message.clientHostname = + object.clientHostname !== undefined && object.clientHostname !== null + ? String(object.clientHostname) + : ""; + message.clientPort = + object.clientPort !== undefined && object.clientPort !== null + ? Number(object.clientPort) + : 0; + message.backendXid = + object.backendXid !== undefined && object.backendXid !== null + ? Number(object.backendXid) + : 0; + message.backendXmin = + object.backendXmin !== undefined && object.backendXmin !== null + ? Number(object.backendXmin) + : 0; + message.blockingPids = + object.blockingPids !== undefined && object.blockingPids !== null + ? String(object.blockingPids) + : ""; + message.queryId = + object.queryId !== undefined && object.queryId !== null + ? String(object.queryId) + : ""; + return message; + }, + + toJSON(message: SessionState): unknown { + const obj: any = {}; + message.time !== undefined && (obj.time = message.time.toISOString()); + message.host !== undefined && (obj.host = message.host); + message.pid !== undefined && (obj.pid = Math.round(message.pid)); + message.database !== undefined && (obj.database = message.database); + message.user !== undefined && (obj.user = message.user); + message.applicationName !== undefined && + (obj.applicationName = message.applicationName); + message.backendStart !== undefined && + (obj.backendStart = message.backendStart.toISOString()); + message.xactStart !== undefined && + (obj.xactStart = message.xactStart.toISOString()); + message.queryStart !== undefined && + (obj.queryStart = message.queryStart.toISOString()); + message.stateChange !== undefined && + (obj.stateChange = message.stateChange.toISOString()); + message.waitEventType !== undefined && + (obj.waitEventType = message.waitEventType); + message.waitEvent !== undefined && (obj.waitEvent = message.waitEvent); + message.state !== undefined && (obj.state = message.state); + message.query !== undefined && (obj.query = message.query); + message.backendType !== undefined && + (obj.backendType = message.backendType); + message.clientAddr !== undefined && (obj.clientAddr = message.clientAddr); + message.clientHostname !== undefined && + (obj.clientHostname = message.clientHostname); + message.clientPort !== undefined && + (obj.clientPort = Math.round(message.clientPort)); + message.backendXid !== undefined && + (obj.backendXid = Math.round(message.backendXid)); + message.backendXmin !== undefined && + (obj.backendXmin = Math.round(message.backendXmin)); + message.blockingPids !== undefined && + (obj.blockingPids = message.blockingPids); + message.queryId !== undefined && (obj.queryId = message.queryId); + return obj; + }, + + fromPartial, I>>( + object: I + ): SessionState { + const message = { ...baseSessionState } as SessionState; + message.time = object.time ?? undefined; + message.host = object.host ?? ""; + message.pid = object.pid ?? 0; + message.database = object.database ?? ""; + message.user = object.user ?? ""; + message.applicationName = object.applicationName ?? ""; + message.backendStart = object.backendStart ?? undefined; + message.xactStart = object.xactStart ?? undefined; + message.queryStart = object.queryStart ?? undefined; + message.stateChange = object.stateChange ?? undefined; + message.waitEventType = object.waitEventType ?? ""; + message.waitEvent = object.waitEvent ?? ""; + message.state = object.state ?? ""; + message.query = object.query ?? ""; + message.backendType = object.backendType ?? ""; + message.clientAddr = object.clientAddr ?? ""; + message.clientHostname = object.clientHostname ?? ""; + message.clientPort = object.clientPort ?? 0; + message.backendXid = object.backendXid ?? 0; + message.backendXmin = object.backendXmin ?? 0; + message.blockingPids = object.blockingPids ?? ""; + message.queryId = object.queryId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(SessionState.$type, SessionState); + +const basePrimaryKey: object = { + $type: "yandex.cloud.mdb.postgresql.v1.PrimaryKey", + host: "", + user: "", + database: "", + toplevel: false, + queryId: "", + planId: "", +}; + +export const PrimaryKey = { + $type: "yandex.cloud.mdb.postgresql.v1.PrimaryKey" as const, + + encode( + message: PrimaryKey, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.host !== "") { + writer.uint32(10).string(message.host); + } + if (message.user !== "") { + writer.uint32(18).string(message.user); + } + if (message.database !== "") { + writer.uint32(34).string(message.database); + } + if (message.toplevel === true) { + writer.uint32(40).bool(message.toplevel); + } + if (message.queryId !== "") { + writer.uint32(50).string(message.queryId); + } + if (message.planId !== "") { + writer.uint32(58).string(message.planId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PrimaryKey { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePrimaryKey } as PrimaryKey; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.host = reader.string(); + break; + case 2: + message.user = reader.string(); + break; + case 4: + message.database = reader.string(); + break; + case 5: + message.toplevel = reader.bool(); + break; + case 6: + message.queryId = reader.string(); + break; + case 7: + message.planId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PrimaryKey { + const message = { ...basePrimaryKey } as PrimaryKey; + message.host = + object.host !== undefined && object.host !== null + ? String(object.host) + : ""; + message.user = + object.user !== undefined && object.user !== null + ? String(object.user) + : ""; + message.database = + object.database !== undefined && object.database !== null + ? String(object.database) + : ""; + message.toplevel = + object.toplevel !== undefined && object.toplevel !== null + ? Boolean(object.toplevel) + : false; + message.queryId = + object.queryId !== undefined && object.queryId !== null + ? String(object.queryId) + : ""; + message.planId = + object.planId !== undefined && object.planId !== null + ? String(object.planId) + : ""; + return message; + }, + + toJSON(message: PrimaryKey): unknown { + const obj: any = {}; + message.host !== undefined && (obj.host = message.host); + message.user !== undefined && (obj.user = message.user); + message.database !== undefined && (obj.database = message.database); + message.toplevel !== undefined && (obj.toplevel = message.toplevel); + message.queryId !== undefined && (obj.queryId = message.queryId); + message.planId !== undefined && (obj.planId = message.planId); + return obj; + }, + + fromPartial, I>>( + object: I + ): PrimaryKey { + const message = { ...basePrimaryKey } as PrimaryKey; + message.host = object.host ?? ""; + message.user = object.user ?? ""; + message.database = object.database ?? ""; + message.toplevel = object.toplevel ?? false; + message.queryId = object.queryId ?? ""; + message.planId = object.planId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(PrimaryKey.$type, PrimaryKey); + +const baseQueryStats: object = { + $type: "yandex.cloud.mdb.postgresql.v1.QueryStats", + query: "", + normalizedPlan: "", + examplePlan: "", + plans: 0, + totalPlanTime: 0, + minPlanTime: 0, + maxPlanTime: 0, + meanPlanTime: 0, + stddevPlanTime: 0, + calls: 0, + totalTime: 0, + minTime: 0, + maxTime: 0, + meanTime: 0, + stddevTime: 0, + rows: 0, + sharedBlksHit: 0, + sharedBlksRead: 0, + sharedBlksDirtied: 0, + sharedBlksWritten: 0, + localBlksHit: 0, + localBlksRead: 0, + localBlksDirtied: 0, + localBlksWritten: 0, + tempBlksRead: 0, + tempBlksWritten: 0, + blkReadTime: 0, + blkWriteTime: 0, + tempBlkReadTime: 0, + tempBlkWriteTime: 0, + walRecords: 0, + walFpi: 0, + walBytes: 0, + jitFunctions: 0, + jitGenerationTime: 0, + jitInliningCount: 0, + jitInliningTime: 0, + jitOptimizationCount: 0, + jitOptimizationTime: 0, + jitEmissionCount: 0, + jitEmissionTime: 0, + startupCost: 0, + totalCost: 0, + planRows: 0, + planWidth: 0, + reads: 0, + writes: 0, + userTime: 0, + systemTime: 0, +}; + +export const QueryStats = { + $type: "yandex.cloud.mdb.postgresql.v1.QueryStats" as const, + + encode( + message: QueryStats, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.time !== undefined) { + Timestamp.encode( + toTimestamp(message.time), + writer.uint32(10).fork() + ).ldelim(); + } + if (message.query !== "") { + writer.uint32(18).string(message.query); + } + if (message.normalizedPlan !== "") { + writer.uint32(26).string(message.normalizedPlan); + } + if (message.examplePlan !== "") { + writer.uint32(34).string(message.examplePlan); + } + if (message.plans !== 0) { + writer.uint32(40).int64(message.plans); + } + if (message.totalPlanTime !== 0) { + writer.uint32(49).double(message.totalPlanTime); + } + if (message.minPlanTime !== 0) { + writer.uint32(57).double(message.minPlanTime); + } + if (message.maxPlanTime !== 0) { + writer.uint32(65).double(message.maxPlanTime); + } + if (message.meanPlanTime !== 0) { + writer.uint32(73).double(message.meanPlanTime); + } + if (message.stddevPlanTime !== 0) { + writer.uint32(81).double(message.stddevPlanTime); + } + if (message.calls !== 0) { + writer.uint32(88).int64(message.calls); + } + if (message.totalTime !== 0) { + writer.uint32(97).double(message.totalTime); + } + if (message.minTime !== 0) { + writer.uint32(105).double(message.minTime); + } + if (message.maxTime !== 0) { + writer.uint32(113).double(message.maxTime); + } + if (message.meanTime !== 0) { + writer.uint32(121).double(message.meanTime); + } + if (message.stddevTime !== 0) { + writer.uint32(129).double(message.stddevTime); + } + if (message.rows !== 0) { + writer.uint32(136).int64(message.rows); + } + if (message.sharedBlksHit !== 0) { + writer.uint32(144).int64(message.sharedBlksHit); + } + if (message.sharedBlksRead !== 0) { + writer.uint32(152).int64(message.sharedBlksRead); + } + if (message.sharedBlksDirtied !== 0) { + writer.uint32(160).int64(message.sharedBlksDirtied); + } + if (message.sharedBlksWritten !== 0) { + writer.uint32(168).int64(message.sharedBlksWritten); + } + if (message.localBlksHit !== 0) { + writer.uint32(176).int64(message.localBlksHit); + } + if (message.localBlksRead !== 0) { + writer.uint32(184).int64(message.localBlksRead); + } + if (message.localBlksDirtied !== 0) { + writer.uint32(192).int64(message.localBlksDirtied); + } + if (message.localBlksWritten !== 0) { + writer.uint32(200).int64(message.localBlksWritten); + } + if (message.tempBlksRead !== 0) { + writer.uint32(208).int64(message.tempBlksRead); + } + if (message.tempBlksWritten !== 0) { + writer.uint32(216).int64(message.tempBlksWritten); + } + if (message.blkReadTime !== 0) { + writer.uint32(225).double(message.blkReadTime); + } + if (message.blkWriteTime !== 0) { + writer.uint32(233).double(message.blkWriteTime); + } + if (message.tempBlkReadTime !== 0) { + writer.uint32(241).double(message.tempBlkReadTime); + } + if (message.tempBlkWriteTime !== 0) { + writer.uint32(249).double(message.tempBlkWriteTime); + } + if (message.walRecords !== 0) { + writer.uint32(256).int64(message.walRecords); + } + if (message.walFpi !== 0) { + writer.uint32(264).int64(message.walFpi); + } + if (message.walBytes !== 0) { + writer.uint32(272).int64(message.walBytes); + } + if (message.jitFunctions !== 0) { + writer.uint32(280).int64(message.jitFunctions); + } + if (message.jitGenerationTime !== 0) { + writer.uint32(289).double(message.jitGenerationTime); + } + if (message.jitInliningCount !== 0) { + writer.uint32(296).int64(message.jitInliningCount); + } + if (message.jitInliningTime !== 0) { + writer.uint32(305).double(message.jitInliningTime); + } + if (message.jitOptimizationCount !== 0) { + writer.uint32(312).int64(message.jitOptimizationCount); + } + if (message.jitOptimizationTime !== 0) { + writer.uint32(321).double(message.jitOptimizationTime); + } + if (message.jitEmissionCount !== 0) { + writer.uint32(328).int64(message.jitEmissionCount); + } + if (message.jitEmissionTime !== 0) { + writer.uint32(337).double(message.jitEmissionTime); + } + if (message.startupCost !== 0) { + writer.uint32(344).int64(message.startupCost); + } + if (message.totalCost !== 0) { + writer.uint32(352).int64(message.totalCost); + } + if (message.planRows !== 0) { + writer.uint32(360).int64(message.planRows); + } + if (message.planWidth !== 0) { + writer.uint32(368).int64(message.planWidth); + } + if (message.reads !== 0) { + writer.uint32(376).int64(message.reads); + } + if (message.writes !== 0) { + writer.uint32(384).int64(message.writes); + } + if (message.userTime !== 0) { + writer.uint32(393).double(message.userTime); + } + if (message.systemTime !== 0) { + writer.uint32(401).double(message.systemTime); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryStats { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryStats } as QueryStats; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.time = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 2: + message.query = reader.string(); + break; + case 3: + message.normalizedPlan = reader.string(); + break; + case 4: + message.examplePlan = reader.string(); + break; + case 5: + message.plans = longToNumber(reader.int64() as Long); + break; + case 6: + message.totalPlanTime = reader.double(); + break; + case 7: + message.minPlanTime = reader.double(); + break; + case 8: + message.maxPlanTime = reader.double(); + break; + case 9: + message.meanPlanTime = reader.double(); + break; + case 10: + message.stddevPlanTime = reader.double(); + break; + case 11: + message.calls = longToNumber(reader.int64() as Long); + break; + case 12: + message.totalTime = reader.double(); + break; + case 13: + message.minTime = reader.double(); + break; + case 14: + message.maxTime = reader.double(); + break; + case 15: + message.meanTime = reader.double(); + break; + case 16: + message.stddevTime = reader.double(); + break; + case 17: + message.rows = longToNumber(reader.int64() as Long); + break; + case 18: + message.sharedBlksHit = longToNumber(reader.int64() as Long); + break; + case 19: + message.sharedBlksRead = longToNumber(reader.int64() as Long); + break; + case 20: + message.sharedBlksDirtied = longToNumber(reader.int64() as Long); + break; + case 21: + message.sharedBlksWritten = longToNumber(reader.int64() as Long); + break; + case 22: + message.localBlksHit = longToNumber(reader.int64() as Long); + break; + case 23: + message.localBlksRead = longToNumber(reader.int64() as Long); + break; + case 24: + message.localBlksDirtied = longToNumber(reader.int64() as Long); + break; + case 25: + message.localBlksWritten = longToNumber(reader.int64() as Long); + break; + case 26: + message.tempBlksRead = longToNumber(reader.int64() as Long); + break; + case 27: + message.tempBlksWritten = longToNumber(reader.int64() as Long); + break; + case 28: + message.blkReadTime = reader.double(); + break; + case 29: + message.blkWriteTime = reader.double(); + break; + case 30: + message.tempBlkReadTime = reader.double(); + break; + case 31: + message.tempBlkWriteTime = reader.double(); + break; + case 32: + message.walRecords = longToNumber(reader.int64() as Long); + break; + case 33: + message.walFpi = longToNumber(reader.int64() as Long); + break; + case 34: + message.walBytes = longToNumber(reader.int64() as Long); + break; + case 35: + message.jitFunctions = longToNumber(reader.int64() as Long); + break; + case 36: + message.jitGenerationTime = reader.double(); + break; + case 37: + message.jitInliningCount = longToNumber(reader.int64() as Long); + break; + case 38: + message.jitInliningTime = reader.double(); + break; + case 39: + message.jitOptimizationCount = longToNumber(reader.int64() as Long); + break; + case 40: + message.jitOptimizationTime = reader.double(); + break; + case 41: + message.jitEmissionCount = longToNumber(reader.int64() as Long); + break; + case 42: + message.jitEmissionTime = reader.double(); + break; + case 43: + message.startupCost = longToNumber(reader.int64() as Long); + break; + case 44: + message.totalCost = longToNumber(reader.int64() as Long); + break; + case 45: + message.planRows = longToNumber(reader.int64() as Long); + break; + case 46: + message.planWidth = longToNumber(reader.int64() as Long); + break; + case 47: + message.reads = longToNumber(reader.int64() as Long); + break; + case 48: + message.writes = longToNumber(reader.int64() as Long); + break; + case 49: + message.userTime = reader.double(); + break; + case 50: + message.systemTime = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryStats { + const message = { ...baseQueryStats } as QueryStats; + message.time = + object.time !== undefined && object.time !== null + ? fromJsonTimestamp(object.time) + : undefined; + message.query = + object.query !== undefined && object.query !== null + ? String(object.query) + : ""; + message.normalizedPlan = + object.normalizedPlan !== undefined && object.normalizedPlan !== null + ? String(object.normalizedPlan) + : ""; + message.examplePlan = + object.examplePlan !== undefined && object.examplePlan !== null + ? String(object.examplePlan) + : ""; + message.plans = + object.plans !== undefined && object.plans !== null + ? Number(object.plans) + : 0; + message.totalPlanTime = + object.totalPlanTime !== undefined && object.totalPlanTime !== null + ? Number(object.totalPlanTime) + : 0; + message.minPlanTime = + object.minPlanTime !== undefined && object.minPlanTime !== null + ? Number(object.minPlanTime) + : 0; + message.maxPlanTime = + object.maxPlanTime !== undefined && object.maxPlanTime !== null + ? Number(object.maxPlanTime) + : 0; + message.meanPlanTime = + object.meanPlanTime !== undefined && object.meanPlanTime !== null + ? Number(object.meanPlanTime) + : 0; + message.stddevPlanTime = + object.stddevPlanTime !== undefined && object.stddevPlanTime !== null + ? Number(object.stddevPlanTime) + : 0; + message.calls = + object.calls !== undefined && object.calls !== null + ? Number(object.calls) + : 0; + message.totalTime = + object.totalTime !== undefined && object.totalTime !== null + ? Number(object.totalTime) + : 0; + message.minTime = + object.minTime !== undefined && object.minTime !== null + ? Number(object.minTime) + : 0; + message.maxTime = + object.maxTime !== undefined && object.maxTime !== null + ? Number(object.maxTime) + : 0; + message.meanTime = + object.meanTime !== undefined && object.meanTime !== null + ? Number(object.meanTime) + : 0; + message.stddevTime = + object.stddevTime !== undefined && object.stddevTime !== null + ? Number(object.stddevTime) + : 0; + message.rows = + object.rows !== undefined && object.rows !== null + ? Number(object.rows) + : 0; + message.sharedBlksHit = + object.sharedBlksHit !== undefined && object.sharedBlksHit !== null + ? Number(object.sharedBlksHit) + : 0; + message.sharedBlksRead = + object.sharedBlksRead !== undefined && object.sharedBlksRead !== null + ? Number(object.sharedBlksRead) + : 0; + message.sharedBlksDirtied = + object.sharedBlksDirtied !== undefined && + object.sharedBlksDirtied !== null + ? Number(object.sharedBlksDirtied) + : 0; + message.sharedBlksWritten = + object.sharedBlksWritten !== undefined && + object.sharedBlksWritten !== null + ? Number(object.sharedBlksWritten) + : 0; + message.localBlksHit = + object.localBlksHit !== undefined && object.localBlksHit !== null + ? Number(object.localBlksHit) + : 0; + message.localBlksRead = + object.localBlksRead !== undefined && object.localBlksRead !== null + ? Number(object.localBlksRead) + : 0; + message.localBlksDirtied = + object.localBlksDirtied !== undefined && object.localBlksDirtied !== null + ? Number(object.localBlksDirtied) + : 0; + message.localBlksWritten = + object.localBlksWritten !== undefined && object.localBlksWritten !== null + ? Number(object.localBlksWritten) + : 0; + message.tempBlksRead = + object.tempBlksRead !== undefined && object.tempBlksRead !== null + ? Number(object.tempBlksRead) + : 0; + message.tempBlksWritten = + object.tempBlksWritten !== undefined && object.tempBlksWritten !== null + ? Number(object.tempBlksWritten) + : 0; + message.blkReadTime = + object.blkReadTime !== undefined && object.blkReadTime !== null + ? Number(object.blkReadTime) + : 0; + message.blkWriteTime = + object.blkWriteTime !== undefined && object.blkWriteTime !== null + ? Number(object.blkWriteTime) + : 0; + message.tempBlkReadTime = + object.tempBlkReadTime !== undefined && object.tempBlkReadTime !== null + ? Number(object.tempBlkReadTime) + : 0; + message.tempBlkWriteTime = + object.tempBlkWriteTime !== undefined && object.tempBlkWriteTime !== null + ? Number(object.tempBlkWriteTime) + : 0; + message.walRecords = + object.walRecords !== undefined && object.walRecords !== null + ? Number(object.walRecords) + : 0; + message.walFpi = + object.walFpi !== undefined && object.walFpi !== null + ? Number(object.walFpi) + : 0; + message.walBytes = + object.walBytes !== undefined && object.walBytes !== null + ? Number(object.walBytes) + : 0; + message.jitFunctions = + object.jitFunctions !== undefined && object.jitFunctions !== null + ? Number(object.jitFunctions) + : 0; + message.jitGenerationTime = + object.jitGenerationTime !== undefined && + object.jitGenerationTime !== null + ? Number(object.jitGenerationTime) + : 0; + message.jitInliningCount = + object.jitInliningCount !== undefined && object.jitInliningCount !== null + ? Number(object.jitInliningCount) + : 0; + message.jitInliningTime = + object.jitInliningTime !== undefined && object.jitInliningTime !== null + ? Number(object.jitInliningTime) + : 0; + message.jitOptimizationCount = + object.jitOptimizationCount !== undefined && + object.jitOptimizationCount !== null + ? Number(object.jitOptimizationCount) + : 0; + message.jitOptimizationTime = + object.jitOptimizationTime !== undefined && + object.jitOptimizationTime !== null + ? Number(object.jitOptimizationTime) + : 0; + message.jitEmissionCount = + object.jitEmissionCount !== undefined && object.jitEmissionCount !== null + ? Number(object.jitEmissionCount) + : 0; + message.jitEmissionTime = + object.jitEmissionTime !== undefined && object.jitEmissionTime !== null + ? Number(object.jitEmissionTime) + : 0; + message.startupCost = + object.startupCost !== undefined && object.startupCost !== null + ? Number(object.startupCost) + : 0; + message.totalCost = + object.totalCost !== undefined && object.totalCost !== null + ? Number(object.totalCost) + : 0; + message.planRows = + object.planRows !== undefined && object.planRows !== null + ? Number(object.planRows) + : 0; + message.planWidth = + object.planWidth !== undefined && object.planWidth !== null + ? Number(object.planWidth) + : 0; + message.reads = + object.reads !== undefined && object.reads !== null + ? Number(object.reads) + : 0; + message.writes = + object.writes !== undefined && object.writes !== null + ? Number(object.writes) + : 0; + message.userTime = + object.userTime !== undefined && object.userTime !== null + ? Number(object.userTime) + : 0; + message.systemTime = + object.systemTime !== undefined && object.systemTime !== null + ? Number(object.systemTime) + : 0; + return message; + }, + + toJSON(message: QueryStats): unknown { + const obj: any = {}; + message.time !== undefined && (obj.time = message.time.toISOString()); + message.query !== undefined && (obj.query = message.query); + message.normalizedPlan !== undefined && + (obj.normalizedPlan = message.normalizedPlan); + message.examplePlan !== undefined && + (obj.examplePlan = message.examplePlan); + message.plans !== undefined && (obj.plans = Math.round(message.plans)); + message.totalPlanTime !== undefined && + (obj.totalPlanTime = message.totalPlanTime); + message.minPlanTime !== undefined && + (obj.minPlanTime = message.minPlanTime); + message.maxPlanTime !== undefined && + (obj.maxPlanTime = message.maxPlanTime); + message.meanPlanTime !== undefined && + (obj.meanPlanTime = message.meanPlanTime); + message.stddevPlanTime !== undefined && + (obj.stddevPlanTime = message.stddevPlanTime); + message.calls !== undefined && (obj.calls = Math.round(message.calls)); + message.totalTime !== undefined && (obj.totalTime = message.totalTime); + message.minTime !== undefined && (obj.minTime = message.minTime); + message.maxTime !== undefined && (obj.maxTime = message.maxTime); + message.meanTime !== undefined && (obj.meanTime = message.meanTime); + message.stddevTime !== undefined && (obj.stddevTime = message.stddevTime); + message.rows !== undefined && (obj.rows = Math.round(message.rows)); + message.sharedBlksHit !== undefined && + (obj.sharedBlksHit = Math.round(message.sharedBlksHit)); + message.sharedBlksRead !== undefined && + (obj.sharedBlksRead = Math.round(message.sharedBlksRead)); + message.sharedBlksDirtied !== undefined && + (obj.sharedBlksDirtied = Math.round(message.sharedBlksDirtied)); + message.sharedBlksWritten !== undefined && + (obj.sharedBlksWritten = Math.round(message.sharedBlksWritten)); + message.localBlksHit !== undefined && + (obj.localBlksHit = Math.round(message.localBlksHit)); + message.localBlksRead !== undefined && + (obj.localBlksRead = Math.round(message.localBlksRead)); + message.localBlksDirtied !== undefined && + (obj.localBlksDirtied = Math.round(message.localBlksDirtied)); + message.localBlksWritten !== undefined && + (obj.localBlksWritten = Math.round(message.localBlksWritten)); + message.tempBlksRead !== undefined && + (obj.tempBlksRead = Math.round(message.tempBlksRead)); + message.tempBlksWritten !== undefined && + (obj.tempBlksWritten = Math.round(message.tempBlksWritten)); + message.blkReadTime !== undefined && + (obj.blkReadTime = message.blkReadTime); + message.blkWriteTime !== undefined && + (obj.blkWriteTime = message.blkWriteTime); + message.tempBlkReadTime !== undefined && + (obj.tempBlkReadTime = message.tempBlkReadTime); + message.tempBlkWriteTime !== undefined && + (obj.tempBlkWriteTime = message.tempBlkWriteTime); + message.walRecords !== undefined && + (obj.walRecords = Math.round(message.walRecords)); + message.walFpi !== undefined && (obj.walFpi = Math.round(message.walFpi)); + message.walBytes !== undefined && + (obj.walBytes = Math.round(message.walBytes)); + message.jitFunctions !== undefined && + (obj.jitFunctions = Math.round(message.jitFunctions)); + message.jitGenerationTime !== undefined && + (obj.jitGenerationTime = message.jitGenerationTime); + message.jitInliningCount !== undefined && + (obj.jitInliningCount = Math.round(message.jitInliningCount)); + message.jitInliningTime !== undefined && + (obj.jitInliningTime = message.jitInliningTime); + message.jitOptimizationCount !== undefined && + (obj.jitOptimizationCount = Math.round(message.jitOptimizationCount)); + message.jitOptimizationTime !== undefined && + (obj.jitOptimizationTime = message.jitOptimizationTime); + message.jitEmissionCount !== undefined && + (obj.jitEmissionCount = Math.round(message.jitEmissionCount)); + message.jitEmissionTime !== undefined && + (obj.jitEmissionTime = message.jitEmissionTime); + message.startupCost !== undefined && + (obj.startupCost = Math.round(message.startupCost)); + message.totalCost !== undefined && + (obj.totalCost = Math.round(message.totalCost)); + message.planRows !== undefined && + (obj.planRows = Math.round(message.planRows)); + message.planWidth !== undefined && + (obj.planWidth = Math.round(message.planWidth)); + message.reads !== undefined && (obj.reads = Math.round(message.reads)); + message.writes !== undefined && (obj.writes = Math.round(message.writes)); + message.userTime !== undefined && (obj.userTime = message.userTime); + message.systemTime !== undefined && (obj.systemTime = message.systemTime); + return obj; + }, + + fromPartial, I>>( + object: I + ): QueryStats { + const message = { ...baseQueryStats } as QueryStats; + message.time = object.time ?? undefined; + message.query = object.query ?? ""; + message.normalizedPlan = object.normalizedPlan ?? ""; + message.examplePlan = object.examplePlan ?? ""; + message.plans = object.plans ?? 0; + message.totalPlanTime = object.totalPlanTime ?? 0; + message.minPlanTime = object.minPlanTime ?? 0; + message.maxPlanTime = object.maxPlanTime ?? 0; + message.meanPlanTime = object.meanPlanTime ?? 0; + message.stddevPlanTime = object.stddevPlanTime ?? 0; + message.calls = object.calls ?? 0; + message.totalTime = object.totalTime ?? 0; + message.minTime = object.minTime ?? 0; + message.maxTime = object.maxTime ?? 0; + message.meanTime = object.meanTime ?? 0; + message.stddevTime = object.stddevTime ?? 0; + message.rows = object.rows ?? 0; + message.sharedBlksHit = object.sharedBlksHit ?? 0; + message.sharedBlksRead = object.sharedBlksRead ?? 0; + message.sharedBlksDirtied = object.sharedBlksDirtied ?? 0; + message.sharedBlksWritten = object.sharedBlksWritten ?? 0; + message.localBlksHit = object.localBlksHit ?? 0; + message.localBlksRead = object.localBlksRead ?? 0; + message.localBlksDirtied = object.localBlksDirtied ?? 0; + message.localBlksWritten = object.localBlksWritten ?? 0; + message.tempBlksRead = object.tempBlksRead ?? 0; + message.tempBlksWritten = object.tempBlksWritten ?? 0; + message.blkReadTime = object.blkReadTime ?? 0; + message.blkWriteTime = object.blkWriteTime ?? 0; + message.tempBlkReadTime = object.tempBlkReadTime ?? 0; + message.tempBlkWriteTime = object.tempBlkWriteTime ?? 0; + message.walRecords = object.walRecords ?? 0; + message.walFpi = object.walFpi ?? 0; + message.walBytes = object.walBytes ?? 0; + message.jitFunctions = object.jitFunctions ?? 0; + message.jitGenerationTime = object.jitGenerationTime ?? 0; + message.jitInliningCount = object.jitInliningCount ?? 0; + message.jitInliningTime = object.jitInliningTime ?? 0; + message.jitOptimizationCount = object.jitOptimizationCount ?? 0; + message.jitOptimizationTime = object.jitOptimizationTime ?? 0; + message.jitEmissionCount = object.jitEmissionCount ?? 0; + message.jitEmissionTime = object.jitEmissionTime ?? 0; + message.startupCost = object.startupCost ?? 0; + message.totalCost = object.totalCost ?? 0; + message.planRows = object.planRows ?? 0; + message.planWidth = object.planWidth ?? 0; + message.reads = object.reads ?? 0; + message.writes = object.writes ?? 0; + message.userTime = object.userTime ?? 0; + message.systemTime = object.systemTime ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(QueryStats.$type, QueryStats); + +const baseQueryStatement: object = { + $type: "yandex.cloud.mdb.postgresql.v1.QueryStatement", +}; + +export const QueryStatement = { + $type: "yandex.cloud.mdb.postgresql.v1.QueryStatement" as const, + + encode( + message: QueryStatement, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== undefined) { + PrimaryKey.encode(message.key, writer.uint32(10).fork()).ldelim(); + } + if (message.stats !== undefined) { + QueryStats.encode(message.stats, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryStatement { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryStatement } as QueryStatement; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = PrimaryKey.decode(reader, reader.uint32()); + break; + case 2: + message.stats = QueryStats.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryStatement { + const message = { ...baseQueryStatement } as QueryStatement; + message.key = + object.key !== undefined && object.key !== null + ? PrimaryKey.fromJSON(object.key) + : undefined; + message.stats = + object.stats !== undefined && object.stats !== null + ? QueryStats.fromJSON(object.stats) + : undefined; + return message; + }, + + toJSON(message: QueryStatement): unknown { + const obj: any = {}; + message.key !== undefined && + (obj.key = message.key ? PrimaryKey.toJSON(message.key) : undefined); + message.stats !== undefined && + (obj.stats = message.stats + ? QueryStats.toJSON(message.stats) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): QueryStatement { + const message = { ...baseQueryStatement } as QueryStatement; + message.key = + object.key !== undefined && object.key !== null + ? PrimaryKey.fromPartial(object.key) + : undefined; + message.stats = + object.stats !== undefined && object.stats !== null + ? QueryStats.fromPartial(object.stats) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryStatement.$type, QueryStatement); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/perf_diag_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/perf_diag_service.ts new file mode 100644 index 00000000..53b893f2 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/perf_diag_service.ts @@ -0,0 +1,722 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; +import { + SessionState, + QueryStatement, +} from "../../../../../yandex/cloud/mdb/postgresql/v1/perf_diag"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; + +export interface ListRawStatementsRequest { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawStatementsRequest"; + /** + * ID of a PostgreSQL cluster to request query statistics for. + * + * To get a PostgreSQL cluster ID, use the [ClusterService.List] method. + */ + clusterId: string; + /** Beginning of the period for which you need to request data (in the [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format). */ + fromTime?: Date; + /** End of the period for which you need to request data (in the [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format). */ + toTime?: Date; + /** The maximum number of results per page to return. If the number of the results is larger than [page_size], the service returns [ListRawStatementsResponse.next_page_token]. You can use it to get the next page of the results in subsequent requests. */ + pageSize: number; + /** Page token. To get the next page of results, set [page_token] to the [ListRawStatementsResponse.next_page_token] returned by the previous SQL statement list request. */ + pageToken: string; +} + +export interface ListRawSessionStatesRequest { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawSessionStatesRequest"; + /** + * ID of a PostgreSQL cluster to request session statistics for. + * + * To get a PostgreSQL cluster ID, use the [ClusterService.List] request. + */ + clusterId: string; + /** Beginning of the period for which you need to request data (in the [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format). */ + fromTime?: Date; + /** End of the period for which you need to request data (in the [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format). */ + toTime?: Date; + /** The maximum number of results per page to return. If the number of the results is larger than [page_size], the service returns [ListRawSessionStatesResponse.next_page_token]. You can use it to get the next page of the results in subsequent requests. */ + pageSize: number; + /** Page token. To get the next page of results, set [page_token] to the [ListRawSessionStatesResponse.next_page_token] returned by the previous PostgreSQL session list request. */ + pageToken: string; +} + +export interface ListRawSessionStatesResponse { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawSessionStatesResponse"; + /** List of PostgreSQL sessions. */ + sessionStates: SessionState[]; + /** This token allows you to get the next page of results when requesting the PostgreSQL session list. If the number of the results is larger than [ListRawSessionStatesRequest.page_size], use the [next_page_token] as the value for the [ListRawSessionStatesRequest.page_token] parameter in the next request. Each subsequent request will have its own [next_page_token] to continue paging through the results. */ + nextPageToken: string; +} + +export interface ListRawStatementsResponse { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawStatementsResponse"; + /** List of SQL statements (queries). */ + statements: QueryStatement[]; + /** This token allows you to get the next page of results when requesting the PostgreSQL session list. If the number of the results is larger than [ListRawStatementsRequest.page_size], use the [next_page_token] as the value for the [ListRawStatementsRequest.page_token] parameter in the next request. Each subsequent request will have its own [next_page_token] to continue paging through the results. */ + nextPageToken: string; +} + +const baseListRawStatementsRequest: object = { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawStatementsRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListRawStatementsRequest = { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawStatementsRequest" as const, + + encode( + message: ListRawStatementsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.fromTime !== undefined) { + Timestamp.encode( + toTimestamp(message.fromTime), + writer.uint32(18).fork() + ).ldelim(); + } + if (message.toTime !== undefined) { + Timestamp.encode( + toTimestamp(message.toTime), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.pageSize !== 0) { + writer.uint32(32).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(42).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListRawStatementsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListRawStatementsRequest, + } as ListRawStatementsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.fromTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 3: + message.toTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 5: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListRawStatementsRequest { + const message = { + ...baseListRawStatementsRequest, + } as ListRawStatementsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.fromTime = + object.fromTime !== undefined && object.fromTime !== null + ? fromJsonTimestamp(object.fromTime) + : undefined; + message.toTime = + object.toTime !== undefined && object.toTime !== null + ? fromJsonTimestamp(object.toTime) + : undefined; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListRawStatementsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.fromTime !== undefined && + (obj.fromTime = message.fromTime.toISOString()); + message.toTime !== undefined && (obj.toTime = message.toTime.toISOString()); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListRawStatementsRequest { + const message = { + ...baseListRawStatementsRequest, + } as ListRawStatementsRequest; + message.clusterId = object.clusterId ?? ""; + message.fromTime = object.fromTime ?? undefined; + message.toTime = object.toTime ?? undefined; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListRawStatementsRequest.$type, + ListRawStatementsRequest +); + +const baseListRawSessionStatesRequest: object = { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawSessionStatesRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListRawSessionStatesRequest = { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawSessionStatesRequest" as const, + + encode( + message: ListRawSessionStatesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.fromTime !== undefined) { + Timestamp.encode( + toTimestamp(message.fromTime), + writer.uint32(18).fork() + ).ldelim(); + } + if (message.toTime !== undefined) { + Timestamp.encode( + toTimestamp(message.toTime), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.pageSize !== 0) { + writer.uint32(32).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(42).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListRawSessionStatesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListRawSessionStatesRequest, + } as ListRawSessionStatesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.fromTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 3: + message.toTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 5: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListRawSessionStatesRequest { + const message = { + ...baseListRawSessionStatesRequest, + } as ListRawSessionStatesRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.fromTime = + object.fromTime !== undefined && object.fromTime !== null + ? fromJsonTimestamp(object.fromTime) + : undefined; + message.toTime = + object.toTime !== undefined && object.toTime !== null + ? fromJsonTimestamp(object.toTime) + : undefined; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListRawSessionStatesRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.fromTime !== undefined && + (obj.fromTime = message.fromTime.toISOString()); + message.toTime !== undefined && (obj.toTime = message.toTime.toISOString()); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListRawSessionStatesRequest { + const message = { + ...baseListRawSessionStatesRequest, + } as ListRawSessionStatesRequest; + message.clusterId = object.clusterId ?? ""; + message.fromTime = object.fromTime ?? undefined; + message.toTime = object.toTime ?? undefined; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListRawSessionStatesRequest.$type, + ListRawSessionStatesRequest +); + +const baseListRawSessionStatesResponse: object = { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawSessionStatesResponse", + nextPageToken: "", +}; + +export const ListRawSessionStatesResponse = { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawSessionStatesResponse" as const, + + encode( + message: ListRawSessionStatesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.sessionStates) { + SessionState.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListRawSessionStatesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListRawSessionStatesResponse, + } as ListRawSessionStatesResponse; + message.sessionStates = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sessionStates.push( + SessionState.decode(reader, reader.uint32()) + ); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListRawSessionStatesResponse { + const message = { + ...baseListRawSessionStatesResponse, + } as ListRawSessionStatesResponse; + message.sessionStates = (object.sessionStates ?? []).map((e: any) => + SessionState.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListRawSessionStatesResponse): unknown { + const obj: any = {}; + if (message.sessionStates) { + obj.sessionStates = message.sessionStates.map((e) => + e ? SessionState.toJSON(e) : undefined + ); + } else { + obj.sessionStates = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListRawSessionStatesResponse { + const message = { + ...baseListRawSessionStatesResponse, + } as ListRawSessionStatesResponse; + message.sessionStates = + object.sessionStates?.map((e) => SessionState.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListRawSessionStatesResponse.$type, + ListRawSessionStatesResponse +); + +const baseListRawStatementsResponse: object = { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawStatementsResponse", + nextPageToken: "", +}; + +export const ListRawStatementsResponse = { + $type: "yandex.cloud.mdb.postgresql.v1.ListRawStatementsResponse" as const, + + encode( + message: ListRawStatementsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.statements) { + QueryStatement.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListRawStatementsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListRawStatementsResponse, + } as ListRawStatementsResponse; + message.statements = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.statements.push( + QueryStatement.decode(reader, reader.uint32()) + ); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListRawStatementsResponse { + const message = { + ...baseListRawStatementsResponse, + } as ListRawStatementsResponse; + message.statements = (object.statements ?? []).map((e: any) => + QueryStatement.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListRawStatementsResponse): unknown { + const obj: any = {}; + if (message.statements) { + obj.statements = message.statements.map((e) => + e ? QueryStatement.toJSON(e) : undefined + ); + } else { + obj.statements = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListRawStatementsResponse { + const message = { + ...baseListRawStatementsResponse, + } as ListRawStatementsResponse; + message.statements = + object.statements?.map((e) => QueryStatement.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListRawStatementsResponse.$type, + ListRawStatementsResponse +); + +/** A set of methods for PostgreSQL performance diagnostics. */ +export const PerformanceDiagnosticsServiceService = { + /** Retrieves raw statistics on sessions. Corresponds to the [pg_stat_activity view](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW). */ + listRawSessionStates: { + path: "/yandex.cloud.mdb.postgresql.v1.PerformanceDiagnosticsService/ListRawSessionStates", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListRawSessionStatesRequest) => + Buffer.from(ListRawSessionStatesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListRawSessionStatesRequest.decode(value), + responseSerialize: (value: ListRawSessionStatesResponse) => + Buffer.from(ListRawSessionStatesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListRawSessionStatesResponse.decode(value), + }, + /** Retrieves statistics on planning and execution of SQL statements (queries). */ + listRawStatements: { + path: "/yandex.cloud.mdb.postgresql.v1.PerformanceDiagnosticsService/ListRawStatements", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListRawStatementsRequest) => + Buffer.from(ListRawStatementsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListRawStatementsRequest.decode(value), + responseSerialize: (value: ListRawStatementsResponse) => + Buffer.from(ListRawStatementsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListRawStatementsResponse.decode(value), + }, +} as const; + +export interface PerformanceDiagnosticsServiceServer + extends UntypedServiceImplementation { + /** Retrieves raw statistics on sessions. Corresponds to the [pg_stat_activity view](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW). */ + listRawSessionStates: handleUnaryCall< + ListRawSessionStatesRequest, + ListRawSessionStatesResponse + >; + /** Retrieves statistics on planning and execution of SQL statements (queries). */ + listRawStatements: handleUnaryCall< + ListRawStatementsRequest, + ListRawStatementsResponse + >; +} + +export interface PerformanceDiagnosticsServiceClient extends Client { + /** Retrieves raw statistics on sessions. Corresponds to the [pg_stat_activity view](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW). */ + listRawSessionStates( + request: ListRawSessionStatesRequest, + callback: ( + error: ServiceError | null, + response: ListRawSessionStatesResponse + ) => void + ): ClientUnaryCall; + listRawSessionStates( + request: ListRawSessionStatesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListRawSessionStatesResponse + ) => void + ): ClientUnaryCall; + listRawSessionStates( + request: ListRawSessionStatesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListRawSessionStatesResponse + ) => void + ): ClientUnaryCall; + /** Retrieves statistics on planning and execution of SQL statements (queries). */ + listRawStatements( + request: ListRawStatementsRequest, + callback: ( + error: ServiceError | null, + response: ListRawStatementsResponse + ) => void + ): ClientUnaryCall; + listRawStatements( + request: ListRawStatementsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListRawStatementsResponse + ) => void + ): ClientUnaryCall; + listRawStatements( + request: ListRawStatementsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListRawStatementsResponse + ) => void + ): ClientUnaryCall; +} + +export const PerformanceDiagnosticsServiceClient = makeGenericClientConstructor( + PerformanceDiagnosticsServiceService, + "yandex.cloud.mdb.postgresql.v1.PerformanceDiagnosticsService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): PerformanceDiagnosticsServiceClient; + service: typeof PerformanceDiagnosticsServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/user.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/user.ts index 96dc10a2..333d9fe4 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/user.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/user.ts @@ -36,11 +36,17 @@ export interface User { */ login?: boolean; /** - * Roles and privileges that are granted to the user (`GRANT TO `). + * A set of roles and privileges that are granted to the user. * * For more information, see [the documentation](/docs/managed-postgresql/operations/grant). */ grants: string[]; + /** + * Deletion Protection inhibits deletion of the user + * + * Default value: `unspecified` (inherits cluster's deletion_protection) + */ + deletionProtection?: boolean; } export interface Permission { @@ -76,11 +82,17 @@ export interface UserSpec { */ login?: boolean; /** - * Roles and privileges that are granted to the user (`GRANT TO `). + * A set of roles and privileges that are granted to the user. * * For more information, see [the documentation](/docs/managed-postgresql/operations/grant). */ grants: string[]; + /** + * Deletion Protection inhibits deletion of the user + * + * Default value: `unspecified` (inherits cluster's deletion_protection) + */ + deletionProtection?: boolean; } /** PostgreSQL user settings. */ @@ -135,6 +147,58 @@ export interface UserSettings { * See in-depth description in [PostgreSQL documentation](https://www.postgresql.org/docs/current/runtime-config-logging.html). */ logStatement: UserSettings_LogStatement; + /** + * Mode that the connection pooler is working in with specified user. + * + * See in-depth description in [Odyssey documentation](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string) + */ + poolMode: UserSettings_PoolingMode; + /** + * User can use prepared statements with transaction pooling. + * + * See in-depth description in [PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-prepare.html) + */ + preparedStatementsPooling?: boolean; + /** + * The connection pooler setting. It determines the maximum allowed replication lag (in seconds). + * Pooler will reject connections to the replica with a lag above this threshold. + * It can be useful to prevent application from reading stale data. + * + * Default value: 0 + * + * Value of `0` disables this mechanism + */ + catchupTimeout?: number; + /** + * The maximum time (in milliseconds) to wait for WAL replication (can be set only for PostgreSQL 12+) + * Terminate replication connections that are inactive for longer than this amount of time. + * + * Default value: `60000` (60 seconds). + * + * Value of `0` disables the timeout mechanism. + * + * See in-depth description in [PostgreSQL documentation](https://www.postgresql.org/docs/current/runtime-config-replication.html) + */ + walSenderTimeout?: number; + /** + * Sets the maximum allowed idle time (in milliseconds) between queries, when in a transaction. + * + * Values of `0` (default) disables the timeout. + * + * See in-depth description in [PostgreSQL documentation](https://www.postgresql.org/docs/current/runtime-config-client.html) + */ + idleInTransactionSessionTimeout?: number; + /** + * The maximum time (in milliseconds) to wait for statement + * The timeout is measured from the time a command arrives at the server until it is completed by the server. + * + * If `log_min_error_statement` is set to ERROR or lower, the statement that timed out will also be logged. + * + * Value of `0` (default) disables the timeout + * + * See in-depth description in [PostgreSQL documentation](https://www.postgresql.org/docs/current/runtime-config-client.html) + */ + statementTimeout?: number; } export enum UserSettings_SynchronousCommit { @@ -334,6 +398,57 @@ export function userSettings_TransactionIsolationToJSON( } } +export enum UserSettings_PoolingMode { + POOLING_MODE_UNSPECIFIED = 0, + /** SESSION - (default) server connection will be assigned to it for the whole duration the client stays connected */ + SESSION = 1, + /** TRANSACTION - server connection is assigned to a client only during a transaction */ + TRANSACTION = 2, + /** STATEMENT - server connection will be put back into the pool immediately after a query completes */ + STATEMENT = 3, + UNRECOGNIZED = -1, +} + +export function userSettings_PoolingModeFromJSON( + object: any +): UserSettings_PoolingMode { + switch (object) { + case 0: + case "POOLING_MODE_UNSPECIFIED": + return UserSettings_PoolingMode.POOLING_MODE_UNSPECIFIED; + case 1: + case "SESSION": + return UserSettings_PoolingMode.SESSION; + case 2: + case "TRANSACTION": + return UserSettings_PoolingMode.TRANSACTION; + case 3: + case "STATEMENT": + return UserSettings_PoolingMode.STATEMENT; + case -1: + case "UNRECOGNIZED": + default: + return UserSettings_PoolingMode.UNRECOGNIZED; + } +} + +export function userSettings_PoolingModeToJSON( + object: UserSettings_PoolingMode +): string { + switch (object) { + case UserSettings_PoolingMode.POOLING_MODE_UNSPECIFIED: + return "POOLING_MODE_UNSPECIFIED"; + case UserSettings_PoolingMode.SESSION: + return "SESSION"; + case UserSettings_PoolingMode.TRANSACTION: + return "TRANSACTION"; + case UserSettings_PoolingMode.STATEMENT: + return "STATEMENT"; + default: + return "UNKNOWN"; + } +} + const baseUser: object = { $type: "yandex.cloud.mdb.postgresql.v1.User", name: "", @@ -370,6 +485,15 @@ export const User = { for (const v of message.grants) { writer.uint32(58).string(v!); } + if (message.deletionProtection !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.deletionProtection!, + }, + writer.uint32(66).fork() + ).ldelim(); + } return writer; }, @@ -403,6 +527,12 @@ export const User = { case 7: message.grants.push(reader.string()); break; + case 8: + message.deletionProtection = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -437,6 +567,11 @@ export const User = { ? Boolean(object.login) : undefined; message.grants = (object.grants ?? []).map((e: any) => String(e)); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : undefined; return message; }, @@ -463,6 +598,8 @@ export const User = { } else { obj.grants = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -479,6 +616,7 @@ export const User = { : undefined; message.login = object.login ?? undefined; message.grants = object.grants?.map((e) => e) || []; + message.deletionProtection = object.deletionProtection ?? undefined; return message; }, }; @@ -589,6 +727,15 @@ export const UserSpec = { for (const v of message.grants) { writer.uint32(58).string(v!); } + if (message.deletionProtection !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.deletionProtection!, + }, + writer.uint32(66).fork() + ).ldelim(); + } return writer; }, @@ -622,6 +769,12 @@ export const UserSpec = { case 7: message.grants.push(reader.string()); break; + case 8: + message.deletionProtection = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -656,6 +809,11 @@ export const UserSpec = { ? Boolean(object.login) : undefined; message.grants = (object.grants ?? []).map((e: any) => String(e)); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : undefined; return message; }, @@ -681,6 +839,8 @@ export const UserSpec = { } else { obj.grants = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -697,6 +857,7 @@ export const UserSpec = { : undefined; message.login = object.login ?? undefined; message.grants = object.grants?.map((e) => e) || []; + message.deletionProtection = object.deletionProtection ?? undefined; return message; }, }; @@ -708,6 +869,7 @@ const baseUserSettings: object = { defaultTransactionIsolation: 0, synchronousCommit: 0, logStatement: 0, + poolMode: 0, }; export const UserSettings = { @@ -747,6 +909,51 @@ export const UserSettings = { if (message.logStatement !== 0) { writer.uint32(48).int32(message.logStatement); } + if (message.poolMode !== 0) { + writer.uint32(56).int32(message.poolMode); + } + if (message.preparedStatementsPooling !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.preparedStatementsPooling!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.catchupTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.catchupTimeout! }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.walSenderTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.walSenderTimeout!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(98).fork() + ).ldelim(); + } return writer; }, @@ -784,6 +991,39 @@ export const UserSettings = { case 6: message.logStatement = reader.int32() as any; break; + case 7: + message.poolMode = reader.int32() as any; + break; + case 8: + message.preparedStatementsPooling = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.catchupTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.walSenderTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -823,6 +1063,32 @@ export const UserSettings = { object.logStatement !== undefined && object.logStatement !== null ? userSettings_LogStatementFromJSON(object.logStatement) : 0; + message.poolMode = + object.poolMode !== undefined && object.poolMode !== null + ? userSettings_PoolingModeFromJSON(object.poolMode) + : 0; + message.preparedStatementsPooling = + object.preparedStatementsPooling !== undefined && + object.preparedStatementsPooling !== null + ? Boolean(object.preparedStatementsPooling) + : undefined; + message.catchupTimeout = + object.catchupTimeout !== undefined && object.catchupTimeout !== null + ? Number(object.catchupTimeout) + : undefined; + message.walSenderTimeout = + object.walSenderTimeout !== undefined && object.walSenderTimeout !== null + ? Number(object.walSenderTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; return message; }, @@ -847,6 +1113,19 @@ export const UserSettings = { (obj.logStatement = userSettings_LogStatementToJSON( message.logStatement )); + message.poolMode !== undefined && + (obj.poolMode = userSettings_PoolingModeToJSON(message.poolMode)); + message.preparedStatementsPooling !== undefined && + (obj.preparedStatementsPooling = message.preparedStatementsPooling); + message.catchupTimeout !== undefined && + (obj.catchupTimeout = message.catchupTimeout); + message.walSenderTimeout !== undefined && + (obj.walSenderTimeout = message.walSenderTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); return obj; }, @@ -862,6 +1141,14 @@ export const UserSettings = { message.synchronousCommit = object.synchronousCommit ?? 0; message.tempFileLimit = object.tempFileLimit ?? undefined; message.logStatement = object.logStatement ?? 0; + message.poolMode = object.poolMode ?? 0; + message.preparedStatementsPooling = + object.preparedStatementsPooling ?? undefined; + message.catchupTimeout = object.catchupTimeout ?? undefined; + message.walSenderTimeout = object.walSenderTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.statementTimeout = object.statementTimeout ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/user_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/user_service.ts index c6385ce0..6bd6f062 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/user_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/user_service.ts @@ -128,11 +128,17 @@ export interface UpdateUserRequest { */ login?: boolean; /** - * Roles and privileges that are granted to the user (`GRANT TO `). + * A set of roles and privileges that are granted to the user. * * For more information, see [the documentation](/docs/managed-postgresql/operations/grant). */ grants: string[]; + /** + * Deletion Protection inhibits deletion of the user + * + * Default value: `unspecified` (inherits cluster's deletion_protection) + */ + deletionProtection?: boolean; } export interface UpdateUserMetadata { @@ -658,6 +664,15 @@ export const UpdateUserRequest = { for (const v of message.grants) { writer.uint32(74).string(v!); } + if (message.deletionProtection !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.deletionProtection!, + }, + writer.uint32(82).fork() + ).ldelim(); + } return writer; }, @@ -697,6 +712,12 @@ export const UpdateUserRequest = { case 9: message.grants.push(reader.string()); break; + case 10: + message.deletionProtection = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -739,6 +760,11 @@ export const UpdateUserRequest = { ? Boolean(object.login) : undefined; message.grants = (object.grants ?? []).map((e: any) => String(e)); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : undefined; return message; }, @@ -770,6 +796,8 @@ export const UpdateUserRequest = { } else { obj.grants = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -793,6 +821,7 @@ export const UpdateUserRequest = { : undefined; message.login = object.login ?? undefined; message.grants = object.grants?.map((e) => e) || []; + message.deletionProtection = object.deletionProtection ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts b/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts index 02fdfe34..2cb71323 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts @@ -7,6 +7,7 @@ import { MaintenanceOperation, } from "../../../../../yandex/cloud/mdb/redis/v1/maintenance"; import { TimeOfDay } from "../../../../../google/type/timeofday"; +import { RedisConfigSet } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Redisconfigset50 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis5_0"; import { Redisconfigset60 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_0"; @@ -68,6 +69,8 @@ export interface Cluster { deletionProtection: boolean; /** Persistence mode */ persistenceMode: Cluster_PersistenceMode; + /** Enable FQDN instead of ip */ + announceHostnames: boolean; } export enum Cluster_Environment { @@ -312,6 +315,8 @@ export interface ClusterConfig { backupWindowStart?: TimeOfDay; /** Access policy to DB */ access?: Access; + /** Unified configuration of a Redis cluster. */ + redis?: RedisConfigSet; } export interface Shard { @@ -583,6 +588,7 @@ const baseCluster: object = { tlsEnabled: false, deletionProtection: false, persistenceMode: 0, + announceHostnames: false, }; export const Cluster = { @@ -665,6 +671,9 @@ export const Cluster = { if (message.persistenceMode !== 0) { writer.uint32(152).int32(message.persistenceMode); } + if (message.announceHostnames === true) { + writer.uint32(160).bool(message.announceHostnames); + } return writer; }, @@ -746,6 +755,9 @@ export const Cluster = { case 19: message.persistenceMode = reader.int32() as any; break; + case 20: + message.announceHostnames = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -832,6 +844,11 @@ export const Cluster = { object.persistenceMode !== undefined && object.persistenceMode !== null ? cluster_PersistenceModeFromJSON(object.persistenceMode) : 0; + message.announceHostnames = + object.announceHostnames !== undefined && + object.announceHostnames !== null + ? Boolean(object.announceHostnames) + : false; return message; }, @@ -889,6 +906,8 @@ export const Cluster = { (obj.persistenceMode = cluster_PersistenceModeToJSON( message.persistenceMode )); + message.announceHostnames !== undefined && + (obj.announceHostnames = message.announceHostnames); return obj; }, @@ -931,6 +950,7 @@ export const Cluster = { message.tlsEnabled = object.tlsEnabled ?? false; message.deletionProtection = object.deletionProtection ?? false; message.persistenceMode = object.persistenceMode ?? 0; + message.announceHostnames = object.announceHostnames ?? false; return message; }, }; @@ -1150,6 +1170,9 @@ export const ClusterConfig = { if (message.access !== undefined) { Access.encode(message.access, writer.uint32(42).fork()).ldelim(); } + if (message.redis !== undefined) { + RedisConfigSet.encode(message.redis, writer.uint32(74).fork()).ldelim(); + } return writer; }, @@ -1196,6 +1219,9 @@ export const ClusterConfig = { case 5: message.access = Access.decode(reader, reader.uint32()); break; + case 9: + message.redis = RedisConfigSet.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1239,6 +1265,10 @@ export const ClusterConfig = { object.access !== undefined && object.access !== null ? Access.fromJSON(object.access) : undefined; + message.redis = + object.redis !== undefined && object.redis !== null + ? RedisConfigSet.fromJSON(object.redis) + : undefined; return message; }, @@ -1271,6 +1301,10 @@ export const ClusterConfig = { : undefined); message.access !== undefined && (obj.access = message.access ? Access.toJSON(message.access) : undefined); + message.redis !== undefined && + (obj.redis = message.redis + ? RedisConfigSet.toJSON(message.redis) + : undefined); return obj; }, @@ -1308,6 +1342,10 @@ export const ClusterConfig = { object.access !== undefined && object.access !== null ? Access.fromPartial(object.access) : undefined; + message.redis = + object.redis !== undefined && object.redis !== null + ? RedisConfigSet.fromPartial(object.redis) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts index a229918c..49504049 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts @@ -32,6 +32,7 @@ import { import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { MaintenanceWindow } from "../../../../../yandex/cloud/mdb/redis/v1/maintenance"; import { TimeOfDay } from "../../../../../google/type/timeofday"; +import { RedisConfig } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; import { Backup } from "../../../../../yandex/cloud/mdb/redis/v1/backup"; @@ -124,6 +125,8 @@ export interface CreateClusterRequest { deletionProtection: boolean; /** Persistence mode */ persistenceMode: Cluster_PersistenceMode; + /** Enable FQDN instead of ip */ + announceHostnames: boolean; } export interface CreateClusterRequest_LabelsEntry { @@ -169,6 +172,8 @@ export interface UpdateClusterRequest { deletionProtection: boolean; /** Persistence mode */ persistenceMode: Cluster_PersistenceMode; + /** Enable FQDN instead of ip */ + announceHostnames: boolean; } export interface UpdateClusterRequest_LabelsEntry { @@ -311,6 +316,8 @@ export interface RestoreClusterRequest { persistenceMode: Cluster_PersistenceMode; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; + /** Enable FQDN instead of ip */ + announceHostnames: boolean; } export interface RestoreClusterRequest_LabelsEntry { @@ -904,6 +911,8 @@ export interface ConfigSpec { backupWindowStart?: TimeOfDay; /** Access policy to DB */ access?: Access; + /** Unified configuration of a Redis cluster */ + redis?: RedisConfig; } const baseGetClusterRequest: object = { @@ -1166,6 +1175,7 @@ const baseCreateClusterRequest: object = { securityGroupIds: "", deletionProtection: false, persistenceMode: 0, + announceHostnames: false, }; export const CreateClusterRequest = { @@ -1224,6 +1234,9 @@ export const CreateClusterRequest = { if (message.persistenceMode !== 0) { writer.uint32(120).int32(message.persistenceMode); } + if (message.announceHostnames === true) { + writer.uint32(128).bool(message.announceHostnames); + } return writer; }, @@ -1285,6 +1298,9 @@ export const CreateClusterRequest = { case 15: message.persistenceMode = reader.int32() as any; break; + case 16: + message.announceHostnames = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1348,6 +1364,11 @@ export const CreateClusterRequest = { object.persistenceMode !== undefined && object.persistenceMode !== null ? cluster_PersistenceModeFromJSON(object.persistenceMode) : 0; + message.announceHostnames = + object.announceHostnames !== undefined && + object.announceHostnames !== null + ? Boolean(object.announceHostnames) + : false; return message; }, @@ -1390,6 +1411,8 @@ export const CreateClusterRequest = { (obj.persistenceMode = cluster_PersistenceModeToJSON( message.persistenceMode )); + message.announceHostnames !== undefined && + (obj.announceHostnames = message.announceHostnames); return obj; }, @@ -1421,6 +1444,7 @@ export const CreateClusterRequest = { message.tlsEnabled = object.tlsEnabled ?? undefined; message.deletionProtection = object.deletionProtection ?? false; message.persistenceMode = object.persistenceMode ?? 0; + message.announceHostnames = object.announceHostnames ?? false; return message; }, }; @@ -1585,6 +1609,7 @@ const baseUpdateClusterRequest: object = { securityGroupIds: "", deletionProtection: false, persistenceMode: 0, + announceHostnames: false, }; export const UpdateClusterRequest = { @@ -1634,6 +1659,9 @@ export const UpdateClusterRequest = { if (message.persistenceMode !== 0) { writer.uint32(80).int32(message.persistenceMode); } + if (message.announceHostnames === true) { + writer.uint32(96).bool(message.announceHostnames); + } return writer; }, @@ -1688,6 +1716,9 @@ export const UpdateClusterRequest = { case 10: message.persistenceMode = reader.int32() as any; break; + case 12: + message.announceHostnames = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1741,6 +1772,11 @@ export const UpdateClusterRequest = { object.persistenceMode !== undefined && object.persistenceMode !== null ? cluster_PersistenceModeFromJSON(object.persistenceMode) : 0; + message.announceHostnames = + object.announceHostnames !== undefined && + object.announceHostnames !== null + ? Boolean(object.announceHostnames) + : false; return message; }, @@ -1779,6 +1815,8 @@ export const UpdateClusterRequest = { (obj.persistenceMode = cluster_PersistenceModeToJSON( message.persistenceMode )); + message.announceHostnames !== undefined && + (obj.announceHostnames = message.announceHostnames); return obj; }, @@ -1813,6 +1851,7 @@ export const UpdateClusterRequest = { message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; message.persistenceMode = object.persistenceMode ?? 0; + message.announceHostnames = object.announceHostnames ?? false; return message; }, }; @@ -2843,6 +2882,7 @@ const baseRestoreClusterRequest: object = { securityGroupIds: "", persistenceMode: 0, deletionProtection: false, + announceHostnames: false, }; export const RestoreClusterRequest = { @@ -2901,6 +2941,9 @@ export const RestoreClusterRequest = { if (message.deletionProtection === true) { writer.uint32(104).bool(message.deletionProtection); } + if (message.announceHostnames === true) { + writer.uint32(112).bool(message.announceHostnames); + } return writer; }, @@ -2962,6 +3005,9 @@ export const RestoreClusterRequest = { case 13: message.deletionProtection = reader.bool(); break; + case 14: + message.announceHostnames = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -3025,6 +3071,11 @@ export const RestoreClusterRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.announceHostnames = + object.announceHostnames !== undefined && + object.announceHostnames !== null + ? Boolean(object.announceHostnames) + : false; return message; }, @@ -3067,6 +3118,8 @@ export const RestoreClusterRequest = { )); message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.announceHostnames !== undefined && + (obj.announceHostnames = message.announceHostnames); return obj; }, @@ -3098,6 +3151,7 @@ export const RestoreClusterRequest = { message.tlsEnabled = object.tlsEnabled ?? undefined; message.persistenceMode = object.persistenceMode ?? 0; message.deletionProtection = object.deletionProtection ?? false; + message.announceHostnames = object.announceHostnames ?? false; return message; }, }; @@ -6302,6 +6356,9 @@ export const ConfigSpec = { if (message.access !== undefined) { Access.encode(message.access, writer.uint32(42).fork()).ldelim(); } + if (message.redis !== undefined) { + RedisConfig.encode(message.redis, writer.uint32(90).fork()).ldelim(); + } return writer; }, @@ -6336,6 +6393,9 @@ export const ConfigSpec = { case 5: message.access = Access.decode(reader, reader.uint32()); break; + case 11: + message.redis = RedisConfig.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -6379,6 +6439,10 @@ export const ConfigSpec = { object.access !== undefined && object.access !== null ? Access.fromJSON(object.access) : undefined; + message.redis = + object.redis !== undefined && object.redis !== null + ? RedisConfig.fromJSON(object.redis) + : undefined; return message; }, @@ -6411,6 +6475,10 @@ export const ConfigSpec = { : undefined); message.access !== undefined && (obj.access = message.access ? Access.toJSON(message.access) : undefined); + message.redis !== undefined && + (obj.redis = message.redis + ? RedisConfig.toJSON(message.redis) + : undefined); return obj; }, @@ -6448,6 +6516,10 @@ export const ConfigSpec = { object.access !== undefined && object.access !== null ? Access.fromPartial(object.access) : undefined; + message.redis = + object.redis !== undefined && object.redis !== null + ? RedisConfig.fromPartial(object.redis) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/redis/v1/config/redis.ts b/src/generated/yandex/cloud/mdb/redis/v1/config/redis.ts new file mode 100644 index 00000000..9f7bf40f --- /dev/null +++ b/src/generated/yandex/cloud/mdb/redis/v1/config/redis.ts @@ -0,0 +1,658 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Int64Value } from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.redis.v1.config"; + +/** + * Fields and structure of `RedisConfig` reflects Redis configuration file + * parameters. + */ +export interface RedisConfig { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig"; + /** + * Redis key eviction policy for a dataset that reaches maximum memory, + * available to the host. Redis maxmemory setting depends on Managed + * Service for Redis [host class](/docs/managed-redis/concepts/instance-types). + * + * All policies are described in detail in [Redis documentation](https://redis.io/topics/lru-cache). + */ + maxmemoryPolicy: RedisConfig_MaxmemoryPolicy; + /** + * Time that Redis keeps the connection open while the client is idle. + * If no new command is sent during that time, the connection is closed. + */ + timeout?: number; + /** Authentication password. */ + password: string; + /** Number of database buckets on a single redis-server process. */ + databases?: number; + /** Threshold for logging slow requests to server in microseconds (log only slower than it). */ + slowlogLogSlowerThan?: number; + /** Max slow requests number to log. */ + slowlogMaxLen?: number; + /** String setting for pub\sub functionality; subset of KEg$lshzxeAtm. */ + notifyKeyspaceEvents: string; + /** Redis connection output buffers limits for pubsub operations. */ + clientOutputBufferLimitPubsub?: RedisConfig_ClientOutputBufferLimit; + /** Redis connection output buffers limits for clients. */ + clientOutputBufferLimitNormal?: RedisConfig_ClientOutputBufferLimit; + /** Redis maxmemory percent */ + maxmemoryPercent?: number; +} + +export enum RedisConfig_MaxmemoryPolicy { + MAXMEMORY_POLICY_UNSPECIFIED = 0, + /** VOLATILE_LRU - Try to remove less recently used (LRU) keys with `expire set`. */ + VOLATILE_LRU = 1, + /** ALLKEYS_LRU - Remove less recently used (LRU) keys. */ + ALLKEYS_LRU = 2, + /** VOLATILE_LFU - Try to remove least frequently used (LFU) keys with `expire set`. */ + VOLATILE_LFU = 3, + /** ALLKEYS_LFU - Remove least frequently used (LFU) keys. */ + ALLKEYS_LFU = 4, + /** VOLATILE_RANDOM - Try to remove keys with `expire set` randomly. */ + VOLATILE_RANDOM = 5, + /** ALLKEYS_RANDOM - Remove keys randomly. */ + ALLKEYS_RANDOM = 6, + /** + * VOLATILE_TTL - Try to remove less recently used (LRU) keys with `expire set` + * and shorter TTL first. + */ + VOLATILE_TTL = 7, + /** + * NOEVICTION - Return errors when memory limit was reached and commands could require + * more memory to be used. + */ + NOEVICTION = 8, + UNRECOGNIZED = -1, +} + +export function redisConfig_MaxmemoryPolicyFromJSON( + object: any +): RedisConfig_MaxmemoryPolicy { + switch (object) { + case 0: + case "MAXMEMORY_POLICY_UNSPECIFIED": + return RedisConfig_MaxmemoryPolicy.MAXMEMORY_POLICY_UNSPECIFIED; + case 1: + case "VOLATILE_LRU": + return RedisConfig_MaxmemoryPolicy.VOLATILE_LRU; + case 2: + case "ALLKEYS_LRU": + return RedisConfig_MaxmemoryPolicy.ALLKEYS_LRU; + case 3: + case "VOLATILE_LFU": + return RedisConfig_MaxmemoryPolicy.VOLATILE_LFU; + case 4: + case "ALLKEYS_LFU": + return RedisConfig_MaxmemoryPolicy.ALLKEYS_LFU; + case 5: + case "VOLATILE_RANDOM": + return RedisConfig_MaxmemoryPolicy.VOLATILE_RANDOM; + case 6: + case "ALLKEYS_RANDOM": + return RedisConfig_MaxmemoryPolicy.ALLKEYS_RANDOM; + case 7: + case "VOLATILE_TTL": + return RedisConfig_MaxmemoryPolicy.VOLATILE_TTL; + case 8: + case "NOEVICTION": + return RedisConfig_MaxmemoryPolicy.NOEVICTION; + case -1: + case "UNRECOGNIZED": + default: + return RedisConfig_MaxmemoryPolicy.UNRECOGNIZED; + } +} + +export function redisConfig_MaxmemoryPolicyToJSON( + object: RedisConfig_MaxmemoryPolicy +): string { + switch (object) { + case RedisConfig_MaxmemoryPolicy.MAXMEMORY_POLICY_UNSPECIFIED: + return "MAXMEMORY_POLICY_UNSPECIFIED"; + case RedisConfig_MaxmemoryPolicy.VOLATILE_LRU: + return "VOLATILE_LRU"; + case RedisConfig_MaxmemoryPolicy.ALLKEYS_LRU: + return "ALLKEYS_LRU"; + case RedisConfig_MaxmemoryPolicy.VOLATILE_LFU: + return "VOLATILE_LFU"; + case RedisConfig_MaxmemoryPolicy.ALLKEYS_LFU: + return "ALLKEYS_LFU"; + case RedisConfig_MaxmemoryPolicy.VOLATILE_RANDOM: + return "VOLATILE_RANDOM"; + case RedisConfig_MaxmemoryPolicy.ALLKEYS_RANDOM: + return "ALLKEYS_RANDOM"; + case RedisConfig_MaxmemoryPolicy.VOLATILE_TTL: + return "VOLATILE_TTL"; + case RedisConfig_MaxmemoryPolicy.NOEVICTION: + return "NOEVICTION"; + default: + return "UNKNOWN"; + } +} + +export interface RedisConfig_ClientOutputBufferLimit { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig.ClientOutputBufferLimit"; + /** Total limit in bytes. */ + hardLimit?: number; + /** Limit in bytes during certain time period. */ + softLimit?: number; + /** Seconds for soft limit. */ + softSeconds?: number; +} + +export interface RedisConfigSet { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet"; + /** + * Effective settings for a Redis cluster (a combination of settings + * defined in [user_config] and [default_config]). + */ + effectiveConfig?: RedisConfig; + /** User-defined settings for a Redis cluster. */ + userConfig?: RedisConfig; + /** Default configuration for a Redis cluster. */ + defaultConfig?: RedisConfig; +} + +const baseRedisConfig: object = { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig", + maxmemoryPolicy: 0, + password: "", + notifyKeyspaceEvents: "", +}; + +export const RedisConfig = { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig" as const, + + encode( + message: RedisConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxmemoryPolicy !== 0) { + writer.uint32(8).int32(message.maxmemoryPolicy); + } + if (message.timeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.timeout! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.password !== "") { + writer.uint32(26).string(message.password); + } + if (message.databases !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.databases! }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.slowlogLogSlowerThan !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowlogLogSlowerThan!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.slowlogMaxLen !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.slowlogMaxLen! }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.notifyKeyspaceEvents !== "") { + writer.uint32(58).string(message.notifyKeyspaceEvents); + } + if (message.clientOutputBufferLimitPubsub !== undefined) { + RedisConfig_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitPubsub, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.clientOutputBufferLimitNormal !== undefined) { + RedisConfig_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitNormal, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.maxmemoryPercent !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxmemoryPercent!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RedisConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRedisConfig } as RedisConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxmemoryPolicy = reader.int32() as any; + break; + case 2: + message.timeout = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.password = reader.string(); + break; + case 4: + message.databases = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.slowlogLogSlowerThan = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.slowlogMaxLen = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.notifyKeyspaceEvents = reader.string(); + break; + case 8: + message.clientOutputBufferLimitPubsub = + RedisConfig_ClientOutputBufferLimit.decode(reader, reader.uint32()); + break; + case 9: + message.clientOutputBufferLimitNormal = + RedisConfig_ClientOutputBufferLimit.decode(reader, reader.uint32()); + break; + case 10: + message.maxmemoryPercent = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RedisConfig { + const message = { ...baseRedisConfig } as RedisConfig; + message.maxmemoryPolicy = + object.maxmemoryPolicy !== undefined && object.maxmemoryPolicy !== null + ? redisConfig_MaxmemoryPolicyFromJSON(object.maxmemoryPolicy) + : 0; + message.timeout = + object.timeout !== undefined && object.timeout !== null + ? Number(object.timeout) + : undefined; + message.password = + object.password !== undefined && object.password !== null + ? String(object.password) + : ""; + message.databases = + object.databases !== undefined && object.databases !== null + ? Number(object.databases) + : undefined; + message.slowlogLogSlowerThan = + object.slowlogLogSlowerThan !== undefined && + object.slowlogLogSlowerThan !== null + ? Number(object.slowlogLogSlowerThan) + : undefined; + message.slowlogMaxLen = + object.slowlogMaxLen !== undefined && object.slowlogMaxLen !== null + ? Number(object.slowlogMaxLen) + : undefined; + message.notifyKeyspaceEvents = + object.notifyKeyspaceEvents !== undefined && + object.notifyKeyspaceEvents !== null + ? String(object.notifyKeyspaceEvents) + : ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? RedisConfig_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? RedisConfig_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitNormal + ) + : undefined; + message.maxmemoryPercent = + object.maxmemoryPercent !== undefined && object.maxmemoryPercent !== null + ? Number(object.maxmemoryPercent) + : undefined; + return message; + }, + + toJSON(message: RedisConfig): unknown { + const obj: any = {}; + message.maxmemoryPolicy !== undefined && + (obj.maxmemoryPolicy = redisConfig_MaxmemoryPolicyToJSON( + message.maxmemoryPolicy + )); + message.timeout !== undefined && (obj.timeout = message.timeout); + message.password !== undefined && (obj.password = message.password); + message.databases !== undefined && (obj.databases = message.databases); + message.slowlogLogSlowerThan !== undefined && + (obj.slowlogLogSlowerThan = message.slowlogLogSlowerThan); + message.slowlogMaxLen !== undefined && + (obj.slowlogMaxLen = message.slowlogMaxLen); + message.notifyKeyspaceEvents !== undefined && + (obj.notifyKeyspaceEvents = message.notifyKeyspaceEvents); + message.clientOutputBufferLimitPubsub !== undefined && + (obj.clientOutputBufferLimitPubsub = message.clientOutputBufferLimitPubsub + ? RedisConfig_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitPubsub + ) + : undefined); + message.clientOutputBufferLimitNormal !== undefined && + (obj.clientOutputBufferLimitNormal = message.clientOutputBufferLimitNormal + ? RedisConfig_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitNormal + ) + : undefined); + message.maxmemoryPercent !== undefined && + (obj.maxmemoryPercent = message.maxmemoryPercent); + return obj; + }, + + fromPartial, I>>( + object: I + ): RedisConfig { + const message = { ...baseRedisConfig } as RedisConfig; + message.maxmemoryPolicy = object.maxmemoryPolicy ?? 0; + message.timeout = object.timeout ?? undefined; + message.password = object.password ?? ""; + message.databases = object.databases ?? undefined; + message.slowlogLogSlowerThan = object.slowlogLogSlowerThan ?? undefined; + message.slowlogMaxLen = object.slowlogMaxLen ?? undefined; + message.notifyKeyspaceEvents = object.notifyKeyspaceEvents ?? ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? RedisConfig_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? RedisConfig_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitNormal + ) + : undefined; + message.maxmemoryPercent = object.maxmemoryPercent ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(RedisConfig.$type, RedisConfig); + +const baseRedisConfig_ClientOutputBufferLimit: object = { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig.ClientOutputBufferLimit", +}; + +export const RedisConfig_ClientOutputBufferLimit = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig.ClientOutputBufferLimit" as const, + + encode( + message: RedisConfig_ClientOutputBufferLimit, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hardLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.hardLimit! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.softLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softLimit! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.softSeconds !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softSeconds! }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RedisConfig_ClientOutputBufferLimit { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRedisConfig_ClientOutputBufferLimit, + } as RedisConfig_ClientOutputBufferLimit; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hardLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.softLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.softSeconds = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RedisConfig_ClientOutputBufferLimit { + const message = { + ...baseRedisConfig_ClientOutputBufferLimit, + } as RedisConfig_ClientOutputBufferLimit; + message.hardLimit = + object.hardLimit !== undefined && object.hardLimit !== null + ? Number(object.hardLimit) + : undefined; + message.softLimit = + object.softLimit !== undefined && object.softLimit !== null + ? Number(object.softLimit) + : undefined; + message.softSeconds = + object.softSeconds !== undefined && object.softSeconds !== null + ? Number(object.softSeconds) + : undefined; + return message; + }, + + toJSON(message: RedisConfig_ClientOutputBufferLimit): unknown { + const obj: any = {}; + message.hardLimit !== undefined && (obj.hardLimit = message.hardLimit); + message.softLimit !== undefined && (obj.softLimit = message.softLimit); + message.softSeconds !== undefined && + (obj.softSeconds = message.softSeconds); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): RedisConfig_ClientOutputBufferLimit { + const message = { + ...baseRedisConfig_ClientOutputBufferLimit, + } as RedisConfig_ClientOutputBufferLimit; + message.hardLimit = object.hardLimit ?? undefined; + message.softLimit = object.softLimit ?? undefined; + message.softSeconds = object.softSeconds ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + RedisConfig_ClientOutputBufferLimit.$type, + RedisConfig_ClientOutputBufferLimit +); + +const baseRedisConfigSet: object = { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet", +}; + +export const RedisConfigSet = { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet" as const, + + encode( + message: RedisConfigSet, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + RedisConfig.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + RedisConfig.encode(message.userConfig, writer.uint32(18).fork()).ldelim(); + } + if (message.defaultConfig !== undefined) { + RedisConfig.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RedisConfigSet { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRedisConfigSet } as RedisConfigSet; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = RedisConfig.decode(reader, reader.uint32()); + break; + case 2: + message.userConfig = RedisConfig.decode(reader, reader.uint32()); + break; + case 3: + message.defaultConfig = RedisConfig.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RedisConfigSet { + const message = { ...baseRedisConfigSet } as RedisConfigSet; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? RedisConfig.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? RedisConfig.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? RedisConfig.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: RedisConfigSet): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? RedisConfig.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? RedisConfig.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? RedisConfig.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): RedisConfigSet { + const message = { ...baseRedisConfigSet } as RedisConfigSet; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? RedisConfig.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? RedisConfig.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? RedisConfig.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(RedisConfigSet.$type, RedisConfigSet); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts b/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts index 3cbf65e9..8fe6917a 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts @@ -39,6 +39,8 @@ export interface Redisconfig62 { clientOutputBufferLimitPubsub?: Redisconfig62_ClientOutputBufferLimit; /** Redis connection output buffers limits for clients. */ clientOutputBufferLimitNormal?: Redisconfig62_ClientOutputBufferLimit; + /** Redis maxmemory percent */ + maxmemoryPercent?: number; } export enum Redisconfig62_MaxmemoryPolicy { @@ -218,6 +220,15 @@ export const Redisconfig62 = { writer.uint32(74).fork() ).ldelim(); } + if (message.maxmemoryPercent !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxmemoryPercent!, + }, + writer.uint32(82).fork() + ).ldelim(); + } return writer; }, @@ -269,6 +280,12 @@ export const Redisconfig62 = { reader.uint32() ); break; + case 10: + message.maxmemoryPercent = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -323,6 +340,10 @@ export const Redisconfig62 = { object.clientOutputBufferLimitNormal ) : undefined; + message.maxmemoryPercent = + object.maxmemoryPercent !== undefined && object.maxmemoryPercent !== null + ? Number(object.maxmemoryPercent) + : undefined; return message; }, @@ -353,6 +374,8 @@ export const Redisconfig62 = { message.clientOutputBufferLimitNormal ) : undefined); + message.maxmemoryPercent !== undefined && + (obj.maxmemoryPercent = message.maxmemoryPercent); return obj; }, @@ -381,6 +404,7 @@ export const Redisconfig62 = { object.clientOutputBufferLimitNormal ) : undefined; + message.maxmemoryPercent = object.maxmemoryPercent ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/redis/v1/config/redis7_0.ts b/src/generated/yandex/cloud/mdb/redis/v1/config/redis7_0.ts index 1f4e8796..da4479f1 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/config/redis7_0.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/config/redis7_0.ts @@ -39,6 +39,8 @@ export interface Redisconfig70 { clientOutputBufferLimitPubsub?: Redisconfig70_ClientOutputBufferLimit; /** Redis connection output buffers limits for clients. */ clientOutputBufferLimitNormal?: Redisconfig70_ClientOutputBufferLimit; + /** Redis maxmemory percent */ + maxmemoryPercent?: number; } export enum Redisconfig70_MaxmemoryPolicy { @@ -218,6 +220,15 @@ export const Redisconfig70 = { writer.uint32(74).fork() ).ldelim(); } + if (message.maxmemoryPercent !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxmemoryPercent!, + }, + writer.uint32(82).fork() + ).ldelim(); + } return writer; }, @@ -269,6 +280,12 @@ export const Redisconfig70 = { reader.uint32() ); break; + case 10: + message.maxmemoryPercent = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -323,6 +340,10 @@ export const Redisconfig70 = { object.clientOutputBufferLimitNormal ) : undefined; + message.maxmemoryPercent = + object.maxmemoryPercent !== undefined && object.maxmemoryPercent !== null + ? Number(object.maxmemoryPercent) + : undefined; return message; }, @@ -353,6 +374,8 @@ export const Redisconfig70 = { message.clientOutputBufferLimitNormal ) : undefined); + message.maxmemoryPercent !== undefined && + (obj.maxmemoryPercent = message.maxmemoryPercent); return obj; }, @@ -381,6 +404,7 @@ export const Redisconfig70 = { object.clientOutputBufferLimitNormal ) : undefined; + message.maxmemoryPercent = object.maxmemoryPercent ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/oauth/claims.ts b/src/generated/yandex/cloud/oauth/claims.ts index dea05605..033831ba 100644 --- a/src/generated/yandex/cloud/oauth/claims.ts +++ b/src/generated/yandex/cloud/oauth/claims.ts @@ -5,6 +5,56 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.oauth"; +export enum SubjectType { + SUBJECT_TYPE_UNSPECIFIED = 0, + USER_ACCOUNT = 1, + SERVICE_ACCOUNT = 2, + GROUP = 3, + INVITEE = 4, + UNRECOGNIZED = -1, +} + +export function subjectTypeFromJSON(object: any): SubjectType { + switch (object) { + case 0: + case "SUBJECT_TYPE_UNSPECIFIED": + return SubjectType.SUBJECT_TYPE_UNSPECIFIED; + case 1: + case "USER_ACCOUNT": + return SubjectType.USER_ACCOUNT; + case 2: + case "SERVICE_ACCOUNT": + return SubjectType.SERVICE_ACCOUNT; + case 3: + case "GROUP": + return SubjectType.GROUP; + case 4: + case "INVITEE": + return SubjectType.INVITEE; + case -1: + case "UNRECOGNIZED": + default: + return SubjectType.UNRECOGNIZED; + } +} + +export function subjectTypeToJSON(object: SubjectType): string { + switch (object) { + case SubjectType.SUBJECT_TYPE_UNSPECIFIED: + return "SUBJECT_TYPE_UNSPECIFIED"; + case SubjectType.USER_ACCOUNT: + return "USER_ACCOUNT"; + case SubjectType.SERVICE_ACCOUNT: + return "SERVICE_ACCOUNT"; + case SubjectType.GROUP: + return "GROUP"; + case SubjectType.INVITEE: + return "INVITEE"; + default: + return "UNKNOWN"; + } +} + /** Claims representation, see https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims for details. */ export interface SubjectClaims { $type: "yandex.cloud.oauth.SubjectClaims"; @@ -43,6 +93,8 @@ export interface SubjectClaims { * If the phone number contains an extension, it is RECOMMENDED that the extension be represented using the RFC 3966 [RFC3966] extension syntax, for example, +1 (604) 555-1234;ext=5678. */ phoneNumber: string; + /** Subject type. */ + subType: SubjectType; /** User federation, non-empty only for federated users. */ federation?: Federation; } @@ -68,6 +120,7 @@ const baseSubjectClaims: object = { zoneinfo: "", locale: "", phoneNumber: "", + subType: 0, }; export const SubjectClaims = { @@ -107,6 +160,9 @@ export const SubjectClaims = { if (message.phoneNumber !== "") { writer.uint32(138).string(message.phoneNumber); } + if (message.subType !== 0) { + writer.uint32(792).int32(message.subType); + } if (message.federation !== undefined) { Federation.encode(message.federation, writer.uint32(802).fork()).ldelim(); } @@ -150,6 +206,9 @@ export const SubjectClaims = { case 17: message.phoneNumber = reader.string(); break; + case 99: + message.subType = reader.int32() as any; + break; case 100: message.federation = Federation.decode(reader, reader.uint32()); break; @@ -202,6 +261,10 @@ export const SubjectClaims = { object.phoneNumber !== undefined && object.phoneNumber !== null ? String(object.phoneNumber) : ""; + message.subType = + object.subType !== undefined && object.subType !== null + ? subjectTypeFromJSON(object.subType) + : 0; message.federation = object.federation !== undefined && object.federation !== null ? Federation.fromJSON(object.federation) @@ -223,6 +286,8 @@ export const SubjectClaims = { message.locale !== undefined && (obj.locale = message.locale); message.phoneNumber !== undefined && (obj.phoneNumber = message.phoneNumber); + message.subType !== undefined && + (obj.subType = subjectTypeToJSON(message.subType)); message.federation !== undefined && (obj.federation = message.federation ? Federation.toJSON(message.federation) @@ -244,6 +309,7 @@ export const SubjectClaims = { message.zoneinfo = object.zoneinfo ?? ""; message.locale = object.locale ?? ""; message.phoneNumber = object.phoneNumber ?? ""; + message.subType = object.subType ?? 0; message.federation = object.federation !== undefined && object.federation !== null ? Federation.fromPartial(object.federation) diff --git a/src/generated/yandex/cloud/organizationmanager/index.ts b/src/generated/yandex/cloud/organizationmanager/index.ts index 89935ab8..b451482a 100644 --- a/src/generated/yandex/cloud/organizationmanager/index.ts +++ b/src/generated/yandex/cloud/organizationmanager/index.ts @@ -1,7 +1,10 @@ export * as group from './v1/group' +export * as group_mapping from './v1/group_mapping' +export * as group_mapping_service from './v1/group_mapping_service' export * as group_service from './v1/group_service' export * as organization from './v1/organization' export * as organization_service from './v1/organization_service' +export * as ssh_certificate_service from './v1/ssh_certificate_service' export * as user_account from './v1/user_account' export * as user_service from './v1/user_service' export * as certificate from './v1/saml/certificate' diff --git a/src/generated/yandex/cloud/organizationmanager/v1/group.ts b/src/generated/yandex/cloud/organizationmanager/v1/group.ts index 1f2cc1d7..8d88ad89 100644 --- a/src/generated/yandex/cloud/organizationmanager/v1/group.ts +++ b/src/generated/yandex/cloud/organizationmanager/v1/group.ts @@ -8,7 +8,7 @@ export const protobufPackage = "yandex.cloud.organizationmanager.v1"; /** * A Group resource. - * For more information, see [Groups](/docs/organization/groups). + * For more information, see [Groups](/docs/organization/manage-groups). */ export interface Group { $type: "yandex.cloud.organizationmanager.v1.Group"; diff --git a/src/generated/yandex/cloud/organizationmanager/v1/group_mapping.ts b/src/generated/yandex/cloud/organizationmanager/v1/group_mapping.ts new file mode 100644 index 00000000..b9ed8c12 --- /dev/null +++ b/src/generated/yandex/cloud/organizationmanager/v1/group_mapping.ts @@ -0,0 +1,212 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.organizationmanager.v1"; + +/** Group mapping represents which external (federated) groups should match which internal (cloud) groups */ +export interface GroupMappingItem { + $type: "yandex.cloud.organizationmanager.v1.GroupMappingItem"; + /** External group id (received from identity provider) */ + externalGroupId: string; + /** Internal cloud group id */ + internalGroupId: string; +} + +/** + * Group synchronization status for a specific federation + * Absence of this object for a federation means that there is no group synchronization set of for the federation. + */ +export interface GroupMapping { + $type: "yandex.cloud.organizationmanager.v1.GroupMapping"; + /** Federation id */ + federationId: string; + /** Flag to show whether group synchronization should be enabled for this federation. */ + enabled: boolean; +} + +const baseGroupMappingItem: object = { + $type: "yandex.cloud.organizationmanager.v1.GroupMappingItem", + externalGroupId: "", + internalGroupId: "", +}; + +export const GroupMappingItem = { + $type: "yandex.cloud.organizationmanager.v1.GroupMappingItem" as const, + + encode( + message: GroupMappingItem, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.externalGroupId !== "") { + writer.uint32(10).string(message.externalGroupId); + } + if (message.internalGroupId !== "") { + writer.uint32(18).string(message.internalGroupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GroupMappingItem { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGroupMappingItem } as GroupMappingItem; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.externalGroupId = reader.string(); + break; + case 2: + message.internalGroupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GroupMappingItem { + const message = { ...baseGroupMappingItem } as GroupMappingItem; + message.externalGroupId = + object.externalGroupId !== undefined && object.externalGroupId !== null + ? String(object.externalGroupId) + : ""; + message.internalGroupId = + object.internalGroupId !== undefined && object.internalGroupId !== null + ? String(object.internalGroupId) + : ""; + return message; + }, + + toJSON(message: GroupMappingItem): unknown { + const obj: any = {}; + message.externalGroupId !== undefined && + (obj.externalGroupId = message.externalGroupId); + message.internalGroupId !== undefined && + (obj.internalGroupId = message.internalGroupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GroupMappingItem { + const message = { ...baseGroupMappingItem } as GroupMappingItem; + message.externalGroupId = object.externalGroupId ?? ""; + message.internalGroupId = object.internalGroupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GroupMappingItem.$type, GroupMappingItem); + +const baseGroupMapping: object = { + $type: "yandex.cloud.organizationmanager.v1.GroupMapping", + federationId: "", + enabled: false, +}; + +export const GroupMapping = { + $type: "yandex.cloud.organizationmanager.v1.GroupMapping" as const, + + encode( + message: GroupMapping, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.federationId !== "") { + writer.uint32(10).string(message.federationId); + } + if (message.enabled === true) { + writer.uint32(16).bool(message.enabled); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GroupMapping { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGroupMapping } as GroupMapping; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.federationId = reader.string(); + break; + case 2: + message.enabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GroupMapping { + const message = { ...baseGroupMapping } as GroupMapping; + message.federationId = + object.federationId !== undefined && object.federationId !== null + ? String(object.federationId) + : ""; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + return message; + }, + + toJSON(message: GroupMapping): unknown { + const obj: any = {}; + message.federationId !== undefined && + (obj.federationId = message.federationId); + message.enabled !== undefined && (obj.enabled = message.enabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): GroupMapping { + const message = { ...baseGroupMapping } as GroupMapping; + message.federationId = object.federationId ?? ""; + message.enabled = object.enabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set(GroupMapping.$type, GroupMapping); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/organizationmanager/v1/group_mapping_service.ts b/src/generated/yandex/cloud/organizationmanager/v1/group_mapping_service.ts new file mode 100644 index 00000000..3a4b905f --- /dev/null +++ b/src/generated/yandex/cloud/organizationmanager/v1/group_mapping_service.ts @@ -0,0 +1,1754 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + GroupMapping, + GroupMappingItem, +} from "../../../../yandex/cloud/organizationmanager/v1/group_mapping"; +import { FieldMask } from "../../../../google/protobuf/field_mask"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.organizationmanager.v1"; + +export interface GetGroupMappingRequest { + $type: "yandex.cloud.organizationmanager.v1.GetGroupMappingRequest"; + federationId: string; +} + +export interface GetGroupMappingResponse { + $type: "yandex.cloud.organizationmanager.v1.GetGroupMappingResponse"; + groupMapping?: GroupMapping; +} + +export interface CreateGroupMappingRequest { + $type: "yandex.cloud.organizationmanager.v1.CreateGroupMappingRequest"; + /** Federation the group mapping will be created for */ + federationId: string; + /** Synchronization status. */ + enabled: boolean; +} + +export interface CreateGroupMappingMetadata { + $type: "yandex.cloud.organizationmanager.v1.CreateGroupMappingMetadata"; + federationId: string; +} + +/** Request for updating group mapping configuration */ +export interface UpdateGroupMappingRequest { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMappingRequest"; + /** Federation the group mapping update is requested */ + federationId: string; + /** A set of fields that should be updated */ + updateMask?: FieldMask; + /** A new state of synchronization to update (if mentioned in update_mask). */ + enabled: boolean; +} + +export interface UpdateGroupMappingMetadata { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMappingMetadata"; + federationId: string; +} + +export interface DeleteGroupMappingRequest { + $type: "yandex.cloud.organizationmanager.v1.DeleteGroupMappingRequest"; + /** Federation the group mapping deletion is requested */ + federationId: string; +} + +export interface DeleteGroupMappingMetadata { + $type: "yandex.cloud.organizationmanager.v1.DeleteGroupMappingMetadata"; + federationId: string; +} + +/** Request for updating group mapping configuration */ +export interface UpdateGroupMappingItemsRequest { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMappingItemsRequest"; + /** Federation the group mapping update is requested */ + federationId: string; + /** A collection of mapping items to add or remove (ignores update_fields). */ + groupMappingItemDeltas: GroupMappingItemDelta[]; +} + +/** Message describes the user's request to change (add or remove) a single group mapping. */ +export interface GroupMappingItemDelta { + $type: "yandex.cloud.organizationmanager.v1.GroupMappingItemDelta"; + item?: GroupMappingItem; + action: GroupMappingItemDelta_Action; +} + +export enum GroupMappingItemDelta_Action { + ACTION_UNSPECIFIED = 0, + /** ADD - Group mapping item is to be added */ + ADD = 1, + /** REMOVE - Group mapping item is to be removed */ + REMOVE = 2, + UNRECOGNIZED = -1, +} + +export function groupMappingItemDelta_ActionFromJSON( + object: any +): GroupMappingItemDelta_Action { + switch (object) { + case 0: + case "ACTION_UNSPECIFIED": + return GroupMappingItemDelta_Action.ACTION_UNSPECIFIED; + case 1: + case "ADD": + return GroupMappingItemDelta_Action.ADD; + case 2: + case "REMOVE": + return GroupMappingItemDelta_Action.REMOVE; + case -1: + case "UNRECOGNIZED": + default: + return GroupMappingItemDelta_Action.UNRECOGNIZED; + } +} + +export function groupMappingItemDelta_ActionToJSON( + object: GroupMappingItemDelta_Action +): string { + switch (object) { + case GroupMappingItemDelta_Action.ACTION_UNSPECIFIED: + return "ACTION_UNSPECIFIED"; + case GroupMappingItemDelta_Action.ADD: + return "ADD"; + case GroupMappingItemDelta_Action.REMOVE: + return "REMOVE"; + default: + return "UNKNOWN"; + } +} + +export interface UpdateGroupMappingItemsMetadata { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMappingItemsMetadata"; + federationId: string; +} + +export interface UpdateGroupMappingItemsResponse { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMappingItemsResponse"; + /** Effective changes that were applied */ + groupMappingItemDeltas: GroupMappingItemDelta[]; +} + +export interface ListGroupMappingItemsRequest { + $type: "yandex.cloud.organizationmanager.v1.ListGroupMappingItemsRequest"; + federationId: string; + pageSize: number; + pageToken: string; + filter: string; +} + +export interface ListGroupMappingItemsResponse { + $type: "yandex.cloud.organizationmanager.v1.ListGroupMappingItemsResponse"; + groupMappingItems: GroupMappingItem[]; + nextPageToken: string; +} + +const baseGetGroupMappingRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.GetGroupMappingRequest", + federationId: "", +}; + +export const GetGroupMappingRequest = { + $type: "yandex.cloud.organizationmanager.v1.GetGroupMappingRequest" as const, + + encode( + message: GetGroupMappingRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.federationId !== "") { + writer.uint32(10).string(message.federationId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetGroupMappingRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetGroupMappingRequest } as GetGroupMappingRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.federationId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetGroupMappingRequest { + const message = { ...baseGetGroupMappingRequest } as GetGroupMappingRequest; + message.federationId = + object.federationId !== undefined && object.federationId !== null + ? String(object.federationId) + : ""; + return message; + }, + + toJSON(message: GetGroupMappingRequest): unknown { + const obj: any = {}; + message.federationId !== undefined && + (obj.federationId = message.federationId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetGroupMappingRequest { + const message = { ...baseGetGroupMappingRequest } as GetGroupMappingRequest; + message.federationId = object.federationId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetGroupMappingRequest.$type, GetGroupMappingRequest); + +const baseGetGroupMappingResponse: object = { + $type: "yandex.cloud.organizationmanager.v1.GetGroupMappingResponse", +}; + +export const GetGroupMappingResponse = { + $type: "yandex.cloud.organizationmanager.v1.GetGroupMappingResponse" as const, + + encode( + message: GetGroupMappingResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupMapping !== undefined) { + GroupMapping.encode( + message.groupMapping, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetGroupMappingResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetGroupMappingResponse, + } as GetGroupMappingResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupMapping = GroupMapping.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetGroupMappingResponse { + const message = { + ...baseGetGroupMappingResponse, + } as GetGroupMappingResponse; + message.groupMapping = + object.groupMapping !== undefined && object.groupMapping !== null + ? GroupMapping.fromJSON(object.groupMapping) + : undefined; + return message; + }, + + toJSON(message: GetGroupMappingResponse): unknown { + const obj: any = {}; + message.groupMapping !== undefined && + (obj.groupMapping = message.groupMapping + ? GroupMapping.toJSON(message.groupMapping) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetGroupMappingResponse { + const message = { + ...baseGetGroupMappingResponse, + } as GetGroupMappingResponse; + message.groupMapping = + object.groupMapping !== undefined && object.groupMapping !== null + ? GroupMapping.fromPartial(object.groupMapping) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(GetGroupMappingResponse.$type, GetGroupMappingResponse); + +const baseCreateGroupMappingRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.CreateGroupMappingRequest", + federationId: "", + enabled: false, +}; + +export const CreateGroupMappingRequest = { + $type: + "yandex.cloud.organizationmanager.v1.CreateGroupMappingRequest" as const, + + encode( + message: CreateGroupMappingRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.federationId !== "") { + writer.uint32(10).string(message.federationId); + } + if (message.enabled === true) { + writer.uint32(24).bool(message.enabled); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateGroupMappingRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateGroupMappingRequest, + } as CreateGroupMappingRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.federationId = reader.string(); + break; + case 3: + message.enabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateGroupMappingRequest { + const message = { + ...baseCreateGroupMappingRequest, + } as CreateGroupMappingRequest; + message.federationId = + object.federationId !== undefined && object.federationId !== null + ? String(object.federationId) + : ""; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + return message; + }, + + toJSON(message: CreateGroupMappingRequest): unknown { + const obj: any = {}; + message.federationId !== undefined && + (obj.federationId = message.federationId); + message.enabled !== undefined && (obj.enabled = message.enabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateGroupMappingRequest { + const message = { + ...baseCreateGroupMappingRequest, + } as CreateGroupMappingRequest; + message.federationId = object.federationId ?? ""; + message.enabled = object.enabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + CreateGroupMappingRequest.$type, + CreateGroupMappingRequest +); + +const baseCreateGroupMappingMetadata: object = { + $type: "yandex.cloud.organizationmanager.v1.CreateGroupMappingMetadata", + federationId: "", +}; + +export const CreateGroupMappingMetadata = { + $type: + "yandex.cloud.organizationmanager.v1.CreateGroupMappingMetadata" as const, + + encode( + message: CreateGroupMappingMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.federationId !== "") { + writer.uint32(10).string(message.federationId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateGroupMappingMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateGroupMappingMetadata, + } as CreateGroupMappingMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.federationId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateGroupMappingMetadata { + const message = { + ...baseCreateGroupMappingMetadata, + } as CreateGroupMappingMetadata; + message.federationId = + object.federationId !== undefined && object.federationId !== null + ? String(object.federationId) + : ""; + return message; + }, + + toJSON(message: CreateGroupMappingMetadata): unknown { + const obj: any = {}; + message.federationId !== undefined && + (obj.federationId = message.federationId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateGroupMappingMetadata { + const message = { + ...baseCreateGroupMappingMetadata, + } as CreateGroupMappingMetadata; + message.federationId = object.federationId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateGroupMappingMetadata.$type, + CreateGroupMappingMetadata +); + +const baseUpdateGroupMappingRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMappingRequest", + federationId: "", + enabled: false, +}; + +export const UpdateGroupMappingRequest = { + $type: + "yandex.cloud.organizationmanager.v1.UpdateGroupMappingRequest" as const, + + encode( + message: UpdateGroupMappingRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.federationId !== "") { + writer.uint32(10).string(message.federationId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.enabled === true) { + writer.uint32(24).bool(message.enabled); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGroupMappingRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateGroupMappingRequest, + } as UpdateGroupMappingRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.federationId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.enabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGroupMappingRequest { + const message = { + ...baseUpdateGroupMappingRequest, + } as UpdateGroupMappingRequest; + message.federationId = + object.federationId !== undefined && object.federationId !== null + ? String(object.federationId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + return message; + }, + + toJSON(message: UpdateGroupMappingRequest): unknown { + const obj: any = {}; + message.federationId !== undefined && + (obj.federationId = message.federationId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.enabled !== undefined && (obj.enabled = message.enabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGroupMappingRequest { + const message = { + ...baseUpdateGroupMappingRequest, + } as UpdateGroupMappingRequest; + message.federationId = object.federationId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.enabled = object.enabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateGroupMappingRequest.$type, + UpdateGroupMappingRequest +); + +const baseUpdateGroupMappingMetadata: object = { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMappingMetadata", + federationId: "", +}; + +export const UpdateGroupMappingMetadata = { + $type: + "yandex.cloud.organizationmanager.v1.UpdateGroupMappingMetadata" as const, + + encode( + message: UpdateGroupMappingMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.federationId !== "") { + writer.uint32(10).string(message.federationId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGroupMappingMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateGroupMappingMetadata, + } as UpdateGroupMappingMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.federationId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGroupMappingMetadata { + const message = { + ...baseUpdateGroupMappingMetadata, + } as UpdateGroupMappingMetadata; + message.federationId = + object.federationId !== undefined && object.federationId !== null + ? String(object.federationId) + : ""; + return message; + }, + + toJSON(message: UpdateGroupMappingMetadata): unknown { + const obj: any = {}; + message.federationId !== undefined && + (obj.federationId = message.federationId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGroupMappingMetadata { + const message = { + ...baseUpdateGroupMappingMetadata, + } as UpdateGroupMappingMetadata; + message.federationId = object.federationId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateGroupMappingMetadata.$type, + UpdateGroupMappingMetadata +); + +const baseDeleteGroupMappingRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.DeleteGroupMappingRequest", + federationId: "", +}; + +export const DeleteGroupMappingRequest = { + $type: + "yandex.cloud.organizationmanager.v1.DeleteGroupMappingRequest" as const, + + encode( + message: DeleteGroupMappingRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.federationId !== "") { + writer.uint32(10).string(message.federationId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteGroupMappingRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteGroupMappingRequest, + } as DeleteGroupMappingRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.federationId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteGroupMappingRequest { + const message = { + ...baseDeleteGroupMappingRequest, + } as DeleteGroupMappingRequest; + message.federationId = + object.federationId !== undefined && object.federationId !== null + ? String(object.federationId) + : ""; + return message; + }, + + toJSON(message: DeleteGroupMappingRequest): unknown { + const obj: any = {}; + message.federationId !== undefined && + (obj.federationId = message.federationId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteGroupMappingRequest { + const message = { + ...baseDeleteGroupMappingRequest, + } as DeleteGroupMappingRequest; + message.federationId = object.federationId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteGroupMappingRequest.$type, + DeleteGroupMappingRequest +); + +const baseDeleteGroupMappingMetadata: object = { + $type: "yandex.cloud.organizationmanager.v1.DeleteGroupMappingMetadata", + federationId: "", +}; + +export const DeleteGroupMappingMetadata = { + $type: + "yandex.cloud.organizationmanager.v1.DeleteGroupMappingMetadata" as const, + + encode( + message: DeleteGroupMappingMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.federationId !== "") { + writer.uint32(10).string(message.federationId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteGroupMappingMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteGroupMappingMetadata, + } as DeleteGroupMappingMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.federationId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteGroupMappingMetadata { + const message = { + ...baseDeleteGroupMappingMetadata, + } as DeleteGroupMappingMetadata; + message.federationId = + object.federationId !== undefined && object.federationId !== null + ? String(object.federationId) + : ""; + return message; + }, + + toJSON(message: DeleteGroupMappingMetadata): unknown { + const obj: any = {}; + message.federationId !== undefined && + (obj.federationId = message.federationId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteGroupMappingMetadata { + const message = { + ...baseDeleteGroupMappingMetadata, + } as DeleteGroupMappingMetadata; + message.federationId = object.federationId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteGroupMappingMetadata.$type, + DeleteGroupMappingMetadata +); + +const baseUpdateGroupMappingItemsRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMappingItemsRequest", + federationId: "", +}; + +export const UpdateGroupMappingItemsRequest = { + $type: + "yandex.cloud.organizationmanager.v1.UpdateGroupMappingItemsRequest" as const, + + encode( + message: UpdateGroupMappingItemsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.federationId !== "") { + writer.uint32(10).string(message.federationId); + } + for (const v of message.groupMappingItemDeltas) { + GroupMappingItemDelta.encode(v!, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGroupMappingItemsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateGroupMappingItemsRequest, + } as UpdateGroupMappingItemsRequest; + message.groupMappingItemDeltas = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.federationId = reader.string(); + break; + case 4: + message.groupMappingItemDeltas.push( + GroupMappingItemDelta.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGroupMappingItemsRequest { + const message = { + ...baseUpdateGroupMappingItemsRequest, + } as UpdateGroupMappingItemsRequest; + message.federationId = + object.federationId !== undefined && object.federationId !== null + ? String(object.federationId) + : ""; + message.groupMappingItemDeltas = (object.groupMappingItemDeltas ?? []).map( + (e: any) => GroupMappingItemDelta.fromJSON(e) + ); + return message; + }, + + toJSON(message: UpdateGroupMappingItemsRequest): unknown { + const obj: any = {}; + message.federationId !== undefined && + (obj.federationId = message.federationId); + if (message.groupMappingItemDeltas) { + obj.groupMappingItemDeltas = message.groupMappingItemDeltas.map((e) => + e ? GroupMappingItemDelta.toJSON(e) : undefined + ); + } else { + obj.groupMappingItemDeltas = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGroupMappingItemsRequest { + const message = { + ...baseUpdateGroupMappingItemsRequest, + } as UpdateGroupMappingItemsRequest; + message.federationId = object.federationId ?? ""; + message.groupMappingItemDeltas = + object.groupMappingItemDeltas?.map((e) => + GroupMappingItemDelta.fromPartial(e) + ) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateGroupMappingItemsRequest.$type, + UpdateGroupMappingItemsRequest +); + +const baseGroupMappingItemDelta: object = { + $type: "yandex.cloud.organizationmanager.v1.GroupMappingItemDelta", + action: 0, +}; + +export const GroupMappingItemDelta = { + $type: "yandex.cloud.organizationmanager.v1.GroupMappingItemDelta" as const, + + encode( + message: GroupMappingItemDelta, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.item !== undefined) { + GroupMappingItem.encode(message.item, writer.uint32(10).fork()).ldelim(); + } + if (message.action !== 0) { + writer.uint32(16).int32(message.action); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GroupMappingItemDelta { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGroupMappingItemDelta } as GroupMappingItemDelta; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.item = GroupMappingItem.decode(reader, reader.uint32()); + break; + case 2: + message.action = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GroupMappingItemDelta { + const message = { ...baseGroupMappingItemDelta } as GroupMappingItemDelta; + message.item = + object.item !== undefined && object.item !== null + ? GroupMappingItem.fromJSON(object.item) + : undefined; + message.action = + object.action !== undefined && object.action !== null + ? groupMappingItemDelta_ActionFromJSON(object.action) + : 0; + return message; + }, + + toJSON(message: GroupMappingItemDelta): unknown { + const obj: any = {}; + message.item !== undefined && + (obj.item = message.item + ? GroupMappingItem.toJSON(message.item) + : undefined); + message.action !== undefined && + (obj.action = groupMappingItemDelta_ActionToJSON(message.action)); + return obj; + }, + + fromPartial, I>>( + object: I + ): GroupMappingItemDelta { + const message = { ...baseGroupMappingItemDelta } as GroupMappingItemDelta; + message.item = + object.item !== undefined && object.item !== null + ? GroupMappingItem.fromPartial(object.item) + : undefined; + message.action = object.action ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(GroupMappingItemDelta.$type, GroupMappingItemDelta); + +const baseUpdateGroupMappingItemsMetadata: object = { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMappingItemsMetadata", + federationId: "", +}; + +export const UpdateGroupMappingItemsMetadata = { + $type: + "yandex.cloud.organizationmanager.v1.UpdateGroupMappingItemsMetadata" as const, + + encode( + message: UpdateGroupMappingItemsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.federationId !== "") { + writer.uint32(10).string(message.federationId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGroupMappingItemsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateGroupMappingItemsMetadata, + } as UpdateGroupMappingItemsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.federationId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGroupMappingItemsMetadata { + const message = { + ...baseUpdateGroupMappingItemsMetadata, + } as UpdateGroupMappingItemsMetadata; + message.federationId = + object.federationId !== undefined && object.federationId !== null + ? String(object.federationId) + : ""; + return message; + }, + + toJSON(message: UpdateGroupMappingItemsMetadata): unknown { + const obj: any = {}; + message.federationId !== undefined && + (obj.federationId = message.federationId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGroupMappingItemsMetadata { + const message = { + ...baseUpdateGroupMappingItemsMetadata, + } as UpdateGroupMappingItemsMetadata; + message.federationId = object.federationId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateGroupMappingItemsMetadata.$type, + UpdateGroupMappingItemsMetadata +); + +const baseUpdateGroupMappingItemsResponse: object = { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMappingItemsResponse", +}; + +export const UpdateGroupMappingItemsResponse = { + $type: + "yandex.cloud.organizationmanager.v1.UpdateGroupMappingItemsResponse" as const, + + encode( + message: UpdateGroupMappingItemsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.groupMappingItemDeltas) { + GroupMappingItemDelta.encode(v!, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGroupMappingItemsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateGroupMappingItemsResponse, + } as UpdateGroupMappingItemsResponse; + message.groupMappingItemDeltas = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 4: + message.groupMappingItemDeltas.push( + GroupMappingItemDelta.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGroupMappingItemsResponse { + const message = { + ...baseUpdateGroupMappingItemsResponse, + } as UpdateGroupMappingItemsResponse; + message.groupMappingItemDeltas = (object.groupMappingItemDeltas ?? []).map( + (e: any) => GroupMappingItemDelta.fromJSON(e) + ); + return message; + }, + + toJSON(message: UpdateGroupMappingItemsResponse): unknown { + const obj: any = {}; + if (message.groupMappingItemDeltas) { + obj.groupMappingItemDeltas = message.groupMappingItemDeltas.map((e) => + e ? GroupMappingItemDelta.toJSON(e) : undefined + ); + } else { + obj.groupMappingItemDeltas = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGroupMappingItemsResponse { + const message = { + ...baseUpdateGroupMappingItemsResponse, + } as UpdateGroupMappingItemsResponse; + message.groupMappingItemDeltas = + object.groupMappingItemDeltas?.map((e) => + GroupMappingItemDelta.fromPartial(e) + ) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateGroupMappingItemsResponse.$type, + UpdateGroupMappingItemsResponse +); + +const baseListGroupMappingItemsRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.ListGroupMappingItemsRequest", + federationId: "", + pageSize: 0, + pageToken: "", + filter: "", +}; + +export const ListGroupMappingItemsRequest = { + $type: + "yandex.cloud.organizationmanager.v1.ListGroupMappingItemsRequest" as const, + + encode( + message: ListGroupMappingItemsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.federationId !== "") { + writer.uint32(10).string(message.federationId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGroupMappingItemsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGroupMappingItemsRequest, + } as ListGroupMappingItemsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.federationId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGroupMappingItemsRequest { + const message = { + ...baseListGroupMappingItemsRequest, + } as ListGroupMappingItemsRequest; + message.federationId = + object.federationId !== undefined && object.federationId !== null + ? String(object.federationId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListGroupMappingItemsRequest): unknown { + const obj: any = {}; + message.federationId !== undefined && + (obj.federationId = message.federationId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGroupMappingItemsRequest { + const message = { + ...baseListGroupMappingItemsRequest, + } as ListGroupMappingItemsRequest; + message.federationId = object.federationId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListGroupMappingItemsRequest.$type, + ListGroupMappingItemsRequest +); + +const baseListGroupMappingItemsResponse: object = { + $type: "yandex.cloud.organizationmanager.v1.ListGroupMappingItemsResponse", + nextPageToken: "", +}; + +export const ListGroupMappingItemsResponse = { + $type: + "yandex.cloud.organizationmanager.v1.ListGroupMappingItemsResponse" as const, + + encode( + message: ListGroupMappingItemsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.groupMappingItems) { + GroupMappingItem.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGroupMappingItemsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGroupMappingItemsResponse, + } as ListGroupMappingItemsResponse; + message.groupMappingItems = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupMappingItems.push( + GroupMappingItem.decode(reader, reader.uint32()) + ); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGroupMappingItemsResponse { + const message = { + ...baseListGroupMappingItemsResponse, + } as ListGroupMappingItemsResponse; + message.groupMappingItems = (object.groupMappingItems ?? []).map((e: any) => + GroupMappingItem.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListGroupMappingItemsResponse): unknown { + const obj: any = {}; + if (message.groupMappingItems) { + obj.groupMappingItems = message.groupMappingItems.map((e) => + e ? GroupMappingItem.toJSON(e) : undefined + ); + } else { + obj.groupMappingItems = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGroupMappingItemsResponse { + const message = { + ...baseListGroupMappingItemsResponse, + } as ListGroupMappingItemsResponse; + message.groupMappingItems = + object.groupMappingItems?.map((e) => GroupMappingItem.fromPartial(e)) || + []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListGroupMappingItemsResponse.$type, + ListGroupMappingItemsResponse +); + +/** RPC service dedicated for federation group mapping. */ +export const GroupMappingServiceService = { + /** + * Returns a group mapping configured for the specific federation + * If a federation does not exist this call will return an error + * NOT_FOUND will be returned + * If a federation exist, but has not ever been configured for group mapping + * the call FAILED_PRECONDITION will be returned. + */ + get: { + path: "/yandex.cloud.organizationmanager.v1.GroupMappingService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetGroupMappingRequest) => + Buffer.from(GetGroupMappingRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetGroupMappingRequest.decode(value), + responseSerialize: (value: GetGroupMappingResponse) => + Buffer.from(GetGroupMappingResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + GetGroupMappingResponse.decode(value), + }, + /** + * Adds a group mapping for a federation + * If mapping already exist, ALREADY_EXISTS will be returned + */ + create: { + path: "/yandex.cloud.organizationmanager.v1.GroupMappingService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateGroupMappingRequest) => + Buffer.from(CreateGroupMappingRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + CreateGroupMappingRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Updates an existing group mapping for a federation + * Errors: + * - if federation is not found + * In case of any error, no changes are applied to existing group mapping + * + * This call is idempotent. The following actions do nothing: + * - enabling when already enabled + * - disabling when disabled + * Such parts of request will be ignored. Others will be applied. + */ + update: { + path: "/yandex.cloud.organizationmanager.v1.GroupMappingService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateGroupMappingRequest) => + Buffer.from(UpdateGroupMappingRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateGroupMappingRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Deletes a group mapping. This will remove all the mapping items + * cascade. + */ + delete: { + path: "/yandex.cloud.organizationmanager.v1.GroupMappingService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteGroupMappingRequest) => + Buffer.from(DeleteGroupMappingRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteGroupMappingRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Returns all the group mappings items + * + * Filtering is only supported by external_group_id or internal_group_id + */ + listItems: { + path: "/yandex.cloud.organizationmanager.v1.GroupMappingService/ListItems", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListGroupMappingItemsRequest) => + Buffer.from(ListGroupMappingItemsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListGroupMappingItemsRequest.decode(value), + responseSerialize: (value: ListGroupMappingItemsResponse) => + Buffer.from(ListGroupMappingItemsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListGroupMappingItemsResponse.decode(value), + }, + /** + * Updates group mapping items for a specified federation + * Errors: + * - if federation is not found + * - if internal group in the mapping added does not exist + * In case of any error, no changes are applied to existing group mapping + * + * This call is idempotent. The following actions do nothing: + * - adding group mapping items that are already present + * - removing group mapping items that are not present + * Such parts of request will be ignored. Others will be applied. + */ + updateItems: { + path: "/yandex.cloud.organizationmanager.v1.GroupMappingService/UpdateItems", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateGroupMappingItemsRequest) => + Buffer.from(UpdateGroupMappingItemsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateGroupMappingItemsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface GroupMappingServiceServer + extends UntypedServiceImplementation { + /** + * Returns a group mapping configured for the specific federation + * If a federation does not exist this call will return an error + * NOT_FOUND will be returned + * If a federation exist, but has not ever been configured for group mapping + * the call FAILED_PRECONDITION will be returned. + */ + get: handleUnaryCall; + /** + * Adds a group mapping for a federation + * If mapping already exist, ALREADY_EXISTS will be returned + */ + create: handleUnaryCall; + /** + * Updates an existing group mapping for a federation + * Errors: + * - if federation is not found + * In case of any error, no changes are applied to existing group mapping + * + * This call is idempotent. The following actions do nothing: + * - enabling when already enabled + * - disabling when disabled + * Such parts of request will be ignored. Others will be applied. + */ + update: handleUnaryCall; + /** + * Deletes a group mapping. This will remove all the mapping items + * cascade. + */ + delete: handleUnaryCall; + /** + * Returns all the group mappings items + * + * Filtering is only supported by external_group_id or internal_group_id + */ + listItems: handleUnaryCall< + ListGroupMappingItemsRequest, + ListGroupMappingItemsResponse + >; + /** + * Updates group mapping items for a specified federation + * Errors: + * - if federation is not found + * - if internal group in the mapping added does not exist + * In case of any error, no changes are applied to existing group mapping + * + * This call is idempotent. The following actions do nothing: + * - adding group mapping items that are already present + * - removing group mapping items that are not present + * Such parts of request will be ignored. Others will be applied. + */ + updateItems: handleUnaryCall; +} + +export interface GroupMappingServiceClient extends Client { + /** + * Returns a group mapping configured for the specific federation + * If a federation does not exist this call will return an error + * NOT_FOUND will be returned + * If a federation exist, but has not ever been configured for group mapping + * the call FAILED_PRECONDITION will be returned. + */ + get( + request: GetGroupMappingRequest, + callback: ( + error: ServiceError | null, + response: GetGroupMappingResponse + ) => void + ): ClientUnaryCall; + get( + request: GetGroupMappingRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: GetGroupMappingResponse + ) => void + ): ClientUnaryCall; + get( + request: GetGroupMappingRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: GetGroupMappingResponse + ) => void + ): ClientUnaryCall; + /** + * Adds a group mapping for a federation + * If mapping already exist, ALREADY_EXISTS will be returned + */ + create( + request: CreateGroupMappingRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateGroupMappingRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateGroupMappingRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Updates an existing group mapping for a federation + * Errors: + * - if federation is not found + * In case of any error, no changes are applied to existing group mapping + * + * This call is idempotent. The following actions do nothing: + * - enabling when already enabled + * - disabling when disabled + * Such parts of request will be ignored. Others will be applied. + */ + update( + request: UpdateGroupMappingRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateGroupMappingRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateGroupMappingRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Deletes a group mapping. This will remove all the mapping items + * cascade. + */ + delete( + request: DeleteGroupMappingRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteGroupMappingRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteGroupMappingRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Returns all the group mappings items + * + * Filtering is only supported by external_group_id or internal_group_id + */ + listItems( + request: ListGroupMappingItemsRequest, + callback: ( + error: ServiceError | null, + response: ListGroupMappingItemsResponse + ) => void + ): ClientUnaryCall; + listItems( + request: ListGroupMappingItemsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListGroupMappingItemsResponse + ) => void + ): ClientUnaryCall; + listItems( + request: ListGroupMappingItemsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListGroupMappingItemsResponse + ) => void + ): ClientUnaryCall; + /** + * Updates group mapping items for a specified federation + * Errors: + * - if federation is not found + * - if internal group in the mapping added does not exist + * In case of any error, no changes are applied to existing group mapping + * + * This call is idempotent. The following actions do nothing: + * - adding group mapping items that are already present + * - removing group mapping items that are not present + * Such parts of request will be ignored. Others will be applied. + */ + updateItems( + request: UpdateGroupMappingItemsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateItems( + request: UpdateGroupMappingItemsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateItems( + request: UpdateGroupMappingItemsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const GroupMappingServiceClient = makeGenericClientConstructor( + GroupMappingServiceService, + "yandex.cloud.organizationmanager.v1.GroupMappingService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): GroupMappingServiceClient; + service: typeof GroupMappingServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/organizationmanager/v1/saml/federation.ts b/src/generated/yandex/cloud/organizationmanager/v1/saml/federation.ts index 01266a35..ba7634c7 100644 --- a/src/generated/yandex/cloud/organizationmanager/v1/saml/federation.ts +++ b/src/generated/yandex/cloud/organizationmanager/v1/saml/federation.ts @@ -56,7 +56,7 @@ export function bindingTypeToJSON(object: BindingType): string { /** * A federation. - * For more information, see [SAML-compatible identity federations](/docs/iam/concepts/users/identity-federations). + * For more information, see [SAML-compatible identity federations](/docs/iam/concepts/federations). */ export interface Federation { $type: "yandex.cloud.organizationmanager.v1.saml.Federation"; @@ -121,6 +121,8 @@ export interface FederationSecuritySettings { $type: "yandex.cloud.organizationmanager.v1.saml.FederationSecuritySettings"; /** Enable encrypted assertions. */ encryptedAssertions: boolean; + /** Value parameter ForceAuthn in SAMLRequest. */ + forceAuthn: boolean; } const baseFederation: object = { @@ -475,6 +477,7 @@ messageTypeRegistry.set(Federation_LabelsEntry.$type, Federation_LabelsEntry); const baseFederationSecuritySettings: object = { $type: "yandex.cloud.organizationmanager.v1.saml.FederationSecuritySettings", encryptedAssertions: false, + forceAuthn: false, }; export const FederationSecuritySettings = { @@ -488,6 +491,9 @@ export const FederationSecuritySettings = { if (message.encryptedAssertions === true) { writer.uint32(8).bool(message.encryptedAssertions); } + if (message.forceAuthn === true) { + writer.uint32(16).bool(message.forceAuthn); + } return writer; }, @@ -506,6 +512,9 @@ export const FederationSecuritySettings = { case 1: message.encryptedAssertions = reader.bool(); break; + case 2: + message.forceAuthn = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -523,6 +532,10 @@ export const FederationSecuritySettings = { object.encryptedAssertions !== null ? Boolean(object.encryptedAssertions) : false; + message.forceAuthn = + object.forceAuthn !== undefined && object.forceAuthn !== null + ? Boolean(object.forceAuthn) + : false; return message; }, @@ -530,6 +543,7 @@ export const FederationSecuritySettings = { const obj: any = {}; message.encryptedAssertions !== undefined && (obj.encryptedAssertions = message.encryptedAssertions); + message.forceAuthn !== undefined && (obj.forceAuthn = message.forceAuthn); return obj; }, @@ -540,6 +554,7 @@ export const FederationSecuritySettings = { ...baseFederationSecuritySettings, } as FederationSecuritySettings; message.encryptedAssertions = object.encryptedAssertions ?? false; + message.forceAuthn = object.forceAuthn ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/organizationmanager/v1/saml/federation_service.ts b/src/generated/yandex/cloud/organizationmanager/v1/saml/federation_service.ts index 859cf493..19649a43 100644 --- a/src/generated/yandex/cloud/organizationmanager/v1/saml/federation_service.ts +++ b/src/generated/yandex/cloud/organizationmanager/v1/saml/federation_service.ts @@ -274,6 +274,15 @@ export interface ListFederatedUserAccountsRequest { * returned by a previous list request. */ pageToken: string; + /** + * A filter expression that filters resources listed in the response. + * The expression must specify: + * 1. The field name. Currently you can use filtering only on the [name_id] field. + * 2. An `=` operator. + * 3. The value in double quotes (`"`). Must be 1-1000 characters long and match the regular expression + * `[a-z0-9A-Z/@_.\-=+*\\]+`. + */ + filter: string; } export interface ListFederatedUserAccountsResponse { @@ -1845,6 +1854,7 @@ const baseListFederatedUserAccountsRequest: object = { federationId: "", pageSize: 0, pageToken: "", + filter: "", }; export const ListFederatedUserAccountsRequest = { @@ -1864,6 +1874,9 @@ export const ListFederatedUserAccountsRequest = { if (message.pageToken !== "") { writer.uint32(26).string(message.pageToken); } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } return writer; }, @@ -1888,6 +1901,9 @@ export const ListFederatedUserAccountsRequest = { case 3: message.pageToken = reader.string(); break; + case 4: + message.filter = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1912,6 +1928,10 @@ export const ListFederatedUserAccountsRequest = { object.pageToken !== undefined && object.pageToken !== null ? String(object.pageToken) : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; return message; }, @@ -1922,6 +1942,7 @@ export const ListFederatedUserAccountsRequest = { message.pageSize !== undefined && (obj.pageSize = Math.round(message.pageSize)); message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); return obj; }, @@ -1934,6 +1955,7 @@ export const ListFederatedUserAccountsRequest = { message.federationId = object.federationId ?? ""; message.pageSize = object.pageSize ?? 0; message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/organizationmanager/v1/ssh_certificate_service.ts b/src/generated/yandex/cloud/organizationmanager/v1/ssh_certificate_service.ts new file mode 100644 index 00000000..8e99e8ee --- /dev/null +++ b/src/generated/yandex/cloud/organizationmanager/v1/ssh_certificate_service.ts @@ -0,0 +1,344 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.organizationmanager.v1"; + +export interface GenerateSshCertificateRequest { + $type: "yandex.cloud.organizationmanager.v1.GenerateSshCertificateRequest"; + /** the cloud must be attached to an organization */ + cloudId: string | undefined; + organizationId: string | undefined; + /** specify subject to generate certificate for default login */ + subjectId: string | undefined; + /** specify os_login for a specific login */ + osLogin: string | undefined; + publicKey: string; +} + +export interface GenerateSshCertificateResponse { + $type: "yandex.cloud.organizationmanager.v1.GenerateSshCertificateResponse"; + /** as per specification https://cvsweb.openbsd.org/src/usr.bin/ssh/PROTOCOL.certkeys?annotate=HEAD */ + signedCertificate: string; +} + +const baseGenerateSshCertificateRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.GenerateSshCertificateRequest", + publicKey: "", +}; + +export const GenerateSshCertificateRequest = { + $type: + "yandex.cloud.organizationmanager.v1.GenerateSshCertificateRequest" as const, + + encode( + message: GenerateSshCertificateRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cloudId !== undefined) { + writer.uint32(10).string(message.cloudId); + } + if (message.organizationId !== undefined) { + writer.uint32(18).string(message.organizationId); + } + if (message.subjectId !== undefined) { + writer.uint32(26).string(message.subjectId); + } + if (message.osLogin !== undefined) { + writer.uint32(34).string(message.osLogin); + } + if (message.publicKey !== "") { + writer.uint32(42).string(message.publicKey); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GenerateSshCertificateRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGenerateSshCertificateRequest, + } as GenerateSshCertificateRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cloudId = reader.string(); + break; + case 2: + message.organizationId = reader.string(); + break; + case 3: + message.subjectId = reader.string(); + break; + case 4: + message.osLogin = reader.string(); + break; + case 5: + message.publicKey = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GenerateSshCertificateRequest { + const message = { + ...baseGenerateSshCertificateRequest, + } as GenerateSshCertificateRequest; + message.cloudId = + object.cloudId !== undefined && object.cloudId !== null + ? String(object.cloudId) + : undefined; + message.organizationId = + object.organizationId !== undefined && object.organizationId !== null + ? String(object.organizationId) + : undefined; + message.subjectId = + object.subjectId !== undefined && object.subjectId !== null + ? String(object.subjectId) + : undefined; + message.osLogin = + object.osLogin !== undefined && object.osLogin !== null + ? String(object.osLogin) + : undefined; + message.publicKey = + object.publicKey !== undefined && object.publicKey !== null + ? String(object.publicKey) + : ""; + return message; + }, + + toJSON(message: GenerateSshCertificateRequest): unknown { + const obj: any = {}; + message.cloudId !== undefined && (obj.cloudId = message.cloudId); + message.organizationId !== undefined && + (obj.organizationId = message.organizationId); + message.subjectId !== undefined && (obj.subjectId = message.subjectId); + message.osLogin !== undefined && (obj.osLogin = message.osLogin); + message.publicKey !== undefined && (obj.publicKey = message.publicKey); + return obj; + }, + + fromPartial, I>>( + object: I + ): GenerateSshCertificateRequest { + const message = { + ...baseGenerateSshCertificateRequest, + } as GenerateSshCertificateRequest; + message.cloudId = object.cloudId ?? undefined; + message.organizationId = object.organizationId ?? undefined; + message.subjectId = object.subjectId ?? undefined; + message.osLogin = object.osLogin ?? undefined; + message.publicKey = object.publicKey ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GenerateSshCertificateRequest.$type, + GenerateSshCertificateRequest +); + +const baseGenerateSshCertificateResponse: object = { + $type: "yandex.cloud.organizationmanager.v1.GenerateSshCertificateResponse", + signedCertificate: "", +}; + +export const GenerateSshCertificateResponse = { + $type: + "yandex.cloud.organizationmanager.v1.GenerateSshCertificateResponse" as const, + + encode( + message: GenerateSshCertificateResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.signedCertificate !== "") { + writer.uint32(10).string(message.signedCertificate); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GenerateSshCertificateResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGenerateSshCertificateResponse, + } as GenerateSshCertificateResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.signedCertificate = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GenerateSshCertificateResponse { + const message = { + ...baseGenerateSshCertificateResponse, + } as GenerateSshCertificateResponse; + message.signedCertificate = + object.signedCertificate !== undefined && + object.signedCertificate !== null + ? String(object.signedCertificate) + : ""; + return message; + }, + + toJSON(message: GenerateSshCertificateResponse): unknown { + const obj: any = {}; + message.signedCertificate !== undefined && + (obj.signedCertificate = message.signedCertificate); + return obj; + }, + + fromPartial, I>>( + object: I + ): GenerateSshCertificateResponse { + const message = { + ...baseGenerateSshCertificateResponse, + } as GenerateSshCertificateResponse; + message.signedCertificate = object.signedCertificate ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GenerateSshCertificateResponse.$type, + GenerateSshCertificateResponse +); + +export const SshCertificateServiceService = { + /** + * Members of an organization can generate certificates for themselves + * Signing certificates for other users requires a special permission + */ + generate: { + path: "/yandex.cloud.organizationmanager.v1.SshCertificateService/Generate", + requestStream: false, + responseStream: false, + requestSerialize: (value: GenerateSshCertificateRequest) => + Buffer.from(GenerateSshCertificateRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + GenerateSshCertificateRequest.decode(value), + responseSerialize: (value: GenerateSshCertificateResponse) => + Buffer.from(GenerateSshCertificateResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + GenerateSshCertificateResponse.decode(value), + }, +} as const; + +export interface SshCertificateServiceServer + extends UntypedServiceImplementation { + /** + * Members of an organization can generate certificates for themselves + * Signing certificates for other users requires a special permission + */ + generate: handleUnaryCall< + GenerateSshCertificateRequest, + GenerateSshCertificateResponse + >; +} + +export interface SshCertificateServiceClient extends Client { + /** + * Members of an organization can generate certificates for themselves + * Signing certificates for other users requires a special permission + */ + generate( + request: GenerateSshCertificateRequest, + callback: ( + error: ServiceError | null, + response: GenerateSshCertificateResponse + ) => void + ): ClientUnaryCall; + generate( + request: GenerateSshCertificateRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: GenerateSshCertificateResponse + ) => void + ): ClientUnaryCall; + generate( + request: GenerateSshCertificateRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: GenerateSshCertificateResponse + ) => void + ): ClientUnaryCall; +} + +export const SshCertificateServiceClient = makeGenericClientConstructor( + SshCertificateServiceService, + "yandex.cloud.organizationmanager.v1.SshCertificateService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): SshCertificateServiceClient; + service: typeof SshCertificateServiceService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts index ad0d2381..c03d8489 100644 --- a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts +++ b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts @@ -2,6 +2,11 @@ import { messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import _m0 from "protobufjs/minimal"; +import { + LogLevel_Level, + logLevel_LevelFromJSON, + logLevel_LevelToJSON, +} from "../../../../../yandex/cloud/logging/v1/log_entry"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.serverless.apigateway.v1"; @@ -30,6 +35,12 @@ export interface ApiGateway { attachedDomains: AttachedDomain[]; /** Network access. If specified the gateway will be attached to specified network/subnet(s). */ connectivity?: Connectivity; + /** Options for logging from the API gateway. */ + logOptions?: LogOptions; + /** Values of variables defined in the specification. */ + variables: { [key: string]: VariableInput }; + /** Canary release of the gateway. */ + canary?: Canary; } export enum ApiGateway_Status { @@ -99,6 +110,12 @@ export interface ApiGateway_LabelsEntry { value: string; } +export interface ApiGateway_VariablesEntry { + $type: "yandex.cloud.serverless.apigateway.v1.ApiGateway.VariablesEntry"; + key: string; + value?: VariableInput; +} + export interface AttachedDomain { $type: "yandex.cloud.serverless.apigateway.v1.AttachedDomain"; /** ID of the domain. */ @@ -126,6 +143,44 @@ export interface Connectivity { subnetId: string[]; } +export interface LogOptions { + $type: "yandex.cloud.serverless.apigateway.v1.LogOptions"; + /** Is logging from API gateway disabled. */ + disabled: boolean; + /** Entry should be written to log group resolved by ID. */ + logGroupId: string | undefined; + /** Entry should be written to default log group for specified folder. */ + folderId: string | undefined; + /** + * Minimum log entry level. + * + * See [LogLevel.Level] for details. + */ + minLevel: LogLevel_Level; +} + +export interface Canary { + $type: "yandex.cloud.serverless.apigateway.v1.Canary"; + /** It describes percentage of requests, which will be processed by canary. */ + weight: number; + /** Values specification variables, associated with canary. */ + variables: { [key: string]: VariableInput }; +} + +export interface Canary_VariablesEntry { + $type: "yandex.cloud.serverless.apigateway.v1.Canary.VariablesEntry"; + key: string; + value?: VariableInput; +} + +export interface VariableInput { + $type: "yandex.cloud.serverless.apigateway.v1.VariableInput"; + stringValue: string | undefined; + intValue: number | undefined; + doubleValue: number | undefined; + boolValue: boolean | undefined; +} + const baseApiGateway: object = { $type: "yandex.cloud.serverless.apigateway.v1.ApiGateway", id: "", @@ -190,6 +245,23 @@ export const ApiGateway = { writer.uint32(98).fork() ).ldelim(); } + if (message.logOptions !== undefined) { + LogOptions.encode(message.logOptions, writer.uint32(106).fork()).ldelim(); + } + Object.entries(message.variables).forEach(([key, value]) => { + ApiGateway_VariablesEntry.encode( + { + $type: + "yandex.cloud.serverless.apigateway.v1.ApiGateway.VariablesEntry", + key: key as any, + value, + }, + writer.uint32(114).fork() + ).ldelim(); + }); + if (message.canary !== undefined) { + Canary.encode(message.canary, writer.uint32(122).fork()).ldelim(); + } return writer; }, @@ -199,6 +271,7 @@ export const ApiGateway = { const message = { ...baseApiGateway } as ApiGateway; message.labels = {}; message.attachedDomains = []; + message.variables = {}; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -242,6 +315,21 @@ export const ApiGateway = { case 12: message.connectivity = Connectivity.decode(reader, reader.uint32()); break; + case 13: + message.logOptions = LogOptions.decode(reader, reader.uint32()); + break; + case 14: + const entry14 = ApiGateway_VariablesEntry.decode( + reader, + reader.uint32() + ); + if (entry14.value !== undefined) { + message.variables[entry14.key] = entry14.value; + } + break; + case 15: + message.canary = Canary.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -295,6 +383,20 @@ export const ApiGateway = { object.connectivity !== undefined && object.connectivity !== null ? Connectivity.fromJSON(object.connectivity) : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromJSON(object.logOptions) + : undefined; + message.variables = Object.entries(object.variables ?? {}).reduce<{ + [key: string]: VariableInput; + }>((acc, [key, value]) => { + acc[key] = VariableInput.fromJSON(value); + return acc; + }, {}); + message.canary = + object.canary !== undefined && object.canary !== null + ? Canary.fromJSON(object.canary) + : undefined; return message; }, @@ -328,6 +430,18 @@ export const ApiGateway = { (obj.connectivity = message.connectivity ? Connectivity.toJSON(message.connectivity) : undefined); + message.logOptions !== undefined && + (obj.logOptions = message.logOptions + ? LogOptions.toJSON(message.logOptions) + : undefined); + obj.variables = {}; + if (message.variables) { + Object.entries(message.variables).forEach(([k, v]) => { + obj.variables[k] = VariableInput.toJSON(v); + }); + } + message.canary !== undefined && + (obj.canary = message.canary ? Canary.toJSON(message.canary) : undefined); return obj; }, @@ -357,6 +471,22 @@ export const ApiGateway = { object.connectivity !== undefined && object.connectivity !== null ? Connectivity.fromPartial(object.connectivity) : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromPartial(object.logOptions) + : undefined; + message.variables = Object.entries(object.variables ?? {}).reduce<{ + [key: string]: VariableInput; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = VariableInput.fromPartial(value); + } + return acc; + }, {}); + message.canary = + object.canary !== undefined && object.canary !== null + ? Canary.fromPartial(object.canary) + : undefined; return message; }, }; @@ -440,6 +570,97 @@ export const ApiGateway_LabelsEntry = { messageTypeRegistry.set(ApiGateway_LabelsEntry.$type, ApiGateway_LabelsEntry); +const baseApiGateway_VariablesEntry: object = { + $type: "yandex.cloud.serverless.apigateway.v1.ApiGateway.VariablesEntry", + key: "", +}; + +export const ApiGateway_VariablesEntry = { + $type: + "yandex.cloud.serverless.apigateway.v1.ApiGateway.VariablesEntry" as const, + + encode( + message: ApiGateway_VariablesEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== undefined) { + VariableInput.encode(message.value, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ApiGateway_VariablesEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseApiGateway_VariablesEntry, + } as ApiGateway_VariablesEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = VariableInput.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ApiGateway_VariablesEntry { + const message = { + ...baseApiGateway_VariablesEntry, + } as ApiGateway_VariablesEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? VariableInput.fromJSON(object.value) + : undefined; + return message; + }, + + toJSON(message: ApiGateway_VariablesEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && + (obj.value = message.value + ? VariableInput.toJSON(message.value) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ApiGateway_VariablesEntry { + const message = { + ...baseApiGateway_VariablesEntry, + } as ApiGateway_VariablesEntry; + message.key = object.key ?? ""; + message.value = + object.value !== undefined && object.value !== null + ? VariableInput.fromPartial(object.value) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + ApiGateway_VariablesEntry.$type, + ApiGateway_VariablesEntry +); + const baseAttachedDomain: object = { $type: "yandex.cloud.serverless.apigateway.v1.AttachedDomain", domainId: "", @@ -619,6 +840,395 @@ export const Connectivity = { messageTypeRegistry.set(Connectivity.$type, Connectivity); +const baseLogOptions: object = { + $type: "yandex.cloud.serverless.apigateway.v1.LogOptions", + disabled: false, + minLevel: 0, +}; + +export const LogOptions = { + $type: "yandex.cloud.serverless.apigateway.v1.LogOptions" as const, + + encode( + message: LogOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.disabled === true) { + writer.uint32(8).bool(message.disabled); + } + if (message.logGroupId !== undefined) { + writer.uint32(18).string(message.logGroupId); + } + if (message.folderId !== undefined) { + writer.uint32(26).string(message.folderId); + } + if (message.minLevel !== 0) { + writer.uint32(32).int32(message.minLevel); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LogOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLogOptions } as LogOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.disabled = reader.bool(); + break; + case 2: + message.logGroupId = reader.string(); + break; + case 3: + message.folderId = reader.string(); + break; + case 4: + message.minLevel = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LogOptions { + const message = { ...baseLogOptions } as LogOptions; + message.disabled = + object.disabled !== undefined && object.disabled !== null + ? Boolean(object.disabled) + : false; + message.logGroupId = + object.logGroupId !== undefined && object.logGroupId !== null + ? String(object.logGroupId) + : undefined; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : undefined; + message.minLevel = + object.minLevel !== undefined && object.minLevel !== null + ? logLevel_LevelFromJSON(object.minLevel) + : 0; + return message; + }, + + toJSON(message: LogOptions): unknown { + const obj: any = {}; + message.disabled !== undefined && (obj.disabled = message.disabled); + message.logGroupId !== undefined && (obj.logGroupId = message.logGroupId); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.minLevel !== undefined && + (obj.minLevel = logLevel_LevelToJSON(message.minLevel)); + return obj; + }, + + fromPartial, I>>( + object: I + ): LogOptions { + const message = { ...baseLogOptions } as LogOptions; + message.disabled = object.disabled ?? false; + message.logGroupId = object.logGroupId ?? undefined; + message.folderId = object.folderId ?? undefined; + message.minLevel = object.minLevel ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(LogOptions.$type, LogOptions); + +const baseCanary: object = { + $type: "yandex.cloud.serverless.apigateway.v1.Canary", + weight: 0, +}; + +export const Canary = { + $type: "yandex.cloud.serverless.apigateway.v1.Canary" as const, + + encode( + message: Canary, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.weight !== 0) { + writer.uint32(8).int64(message.weight); + } + Object.entries(message.variables).forEach(([key, value]) => { + Canary_VariablesEntry.encode( + { + $type: "yandex.cloud.serverless.apigateway.v1.Canary.VariablesEntry", + key: key as any, + value, + }, + writer.uint32(18).fork() + ).ldelim(); + }); + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Canary { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCanary } as Canary; + message.variables = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.weight = longToNumber(reader.int64() as Long); + break; + case 2: + const entry2 = Canary_VariablesEntry.decode(reader, reader.uint32()); + if (entry2.value !== undefined) { + message.variables[entry2.key] = entry2.value; + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Canary { + const message = { ...baseCanary } as Canary; + message.weight = + object.weight !== undefined && object.weight !== null + ? Number(object.weight) + : 0; + message.variables = Object.entries(object.variables ?? {}).reduce<{ + [key: string]: VariableInput; + }>((acc, [key, value]) => { + acc[key] = VariableInput.fromJSON(value); + return acc; + }, {}); + return message; + }, + + toJSON(message: Canary): unknown { + const obj: any = {}; + message.weight !== undefined && (obj.weight = Math.round(message.weight)); + obj.variables = {}; + if (message.variables) { + Object.entries(message.variables).forEach(([k, v]) => { + obj.variables[k] = VariableInput.toJSON(v); + }); + } + return obj; + }, + + fromPartial, I>>(object: I): Canary { + const message = { ...baseCanary } as Canary; + message.weight = object.weight ?? 0; + message.variables = Object.entries(object.variables ?? {}).reduce<{ + [key: string]: VariableInput; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = VariableInput.fromPartial(value); + } + return acc; + }, {}); + return message; + }, +}; + +messageTypeRegistry.set(Canary.$type, Canary); + +const baseCanary_VariablesEntry: object = { + $type: "yandex.cloud.serverless.apigateway.v1.Canary.VariablesEntry", + key: "", +}; + +export const Canary_VariablesEntry = { + $type: "yandex.cloud.serverless.apigateway.v1.Canary.VariablesEntry" as const, + + encode( + message: Canary_VariablesEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== undefined) { + VariableInput.encode(message.value, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Canary_VariablesEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCanary_VariablesEntry } as Canary_VariablesEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = VariableInput.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Canary_VariablesEntry { + const message = { ...baseCanary_VariablesEntry } as Canary_VariablesEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? VariableInput.fromJSON(object.value) + : undefined; + return message; + }, + + toJSON(message: Canary_VariablesEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && + (obj.value = message.value + ? VariableInput.toJSON(message.value) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Canary_VariablesEntry { + const message = { ...baseCanary_VariablesEntry } as Canary_VariablesEntry; + message.key = object.key ?? ""; + message.value = + object.value !== undefined && object.value !== null + ? VariableInput.fromPartial(object.value) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Canary_VariablesEntry.$type, Canary_VariablesEntry); + +const baseVariableInput: object = { + $type: "yandex.cloud.serverless.apigateway.v1.VariableInput", +}; + +export const VariableInput = { + $type: "yandex.cloud.serverless.apigateway.v1.VariableInput" as const, + + encode( + message: VariableInput, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.stringValue !== undefined) { + writer.uint32(10).string(message.stringValue); + } + if (message.intValue !== undefined) { + writer.uint32(16).int64(message.intValue); + } + if (message.doubleValue !== undefined) { + writer.uint32(25).double(message.doubleValue); + } + if (message.boolValue !== undefined) { + writer.uint32(32).bool(message.boolValue); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): VariableInput { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseVariableInput } as VariableInput; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.stringValue = reader.string(); + break; + case 2: + message.intValue = longToNumber(reader.int64() as Long); + break; + case 3: + message.doubleValue = reader.double(); + break; + case 4: + message.boolValue = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): VariableInput { + const message = { ...baseVariableInput } as VariableInput; + message.stringValue = + object.stringValue !== undefined && object.stringValue !== null + ? String(object.stringValue) + : undefined; + message.intValue = + object.intValue !== undefined && object.intValue !== null + ? Number(object.intValue) + : undefined; + message.doubleValue = + object.doubleValue !== undefined && object.doubleValue !== null + ? Number(object.doubleValue) + : undefined; + message.boolValue = + object.boolValue !== undefined && object.boolValue !== null + ? Boolean(object.boolValue) + : undefined; + return message; + }, + + toJSON(message: VariableInput): unknown { + const obj: any = {}; + message.stringValue !== undefined && + (obj.stringValue = message.stringValue); + message.intValue !== undefined && + (obj.intValue = Math.round(message.intValue)); + message.doubleValue !== undefined && + (obj.doubleValue = message.doubleValue); + message.boolValue !== undefined && (obj.boolValue = message.boolValue); + return obj; + }, + + fromPartial, I>>( + object: I + ): VariableInput { + const message = { ...baseVariableInput } as VariableInput; + message.stringValue = object.stringValue ?? undefined; + message.intValue = object.intValue ?? undefined; + message.doubleValue = object.doubleValue ?? undefined; + message.boolValue = object.boolValue ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(VariableInput.$type, VariableInput); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + type Builtin = | Date | Function @@ -668,6 +1278,13 @@ function fromJsonTimestamp(o: any): Date { } } +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + if (_m0.util.Long !== Long) { _m0.util.Long = Long as any; _m0.configure(); diff --git a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts index b22c0f8d..d3a8c485 100644 --- a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts +++ b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts @@ -16,6 +16,9 @@ import { import _m0 from "protobufjs/minimal"; import { Connectivity, + LogOptions, + Canary, + VariableInput, ApiGateway, } from "../../../../../yandex/cloud/serverless/apigateway/v1/apigateway"; import { FieldMask } from "../../../../../google/protobuf/field_mask"; @@ -64,7 +67,7 @@ export interface ListApiGatewayRequest { * A filter expression that filters functions listed in the response. * * The expression must specify: - * 1. The field name. Currently filtering can only be applied to the [ApiGateway.name] field. + * 1. The field name. Currently filtering can only be applied to the [ApiGateway.name](index) field. * 2. An `=` operator. * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z]([-a-z0-9]{0,61}[a-z0-9])?`. * Example of a filter: `name=my-apigw`. @@ -107,6 +110,12 @@ export interface CreateApiGatewayRequest { openapiSpec: string | undefined; /** Gateway connectivity. If specified the gateway will be attached to specified network/subnet(s). */ connectivity?: Connectivity; + /** Options for logging from the API gateway. */ + logOptions?: LogOptions; + /** Values of variables defined in the specification. */ + variables: { [key: string]: VariableInput }; + /** Canary release of the gateway. */ + canary?: Canary; } export interface CreateApiGatewayRequest_LabelsEntry { @@ -115,6 +124,12 @@ export interface CreateApiGatewayRequest_LabelsEntry { value: string; } +export interface CreateApiGatewayRequest_VariablesEntry { + $type: "yandex.cloud.serverless.apigateway.v1.CreateApiGatewayRequest.VariablesEntry"; + key: string; + value?: VariableInput; +} + export interface UpdateApiGatewayRequest { $type: "yandex.cloud.serverless.apigateway.v1.UpdateApiGatewayRequest"; /** @@ -136,13 +151,19 @@ export interface UpdateApiGatewayRequest { * API gateway labels as `key:value` pairs. * * Existing set of labels is completely replaced by the provided set, so if you just want - * to add or remove a label, request the current set of labels with a [ApiGatewayService.Get] request. + * to add or remove a label, request the current set of labels with a [yandex.cloud.serverless.apigateway.v1.ApiGatewayService.Get] request. */ labels: { [key: string]: string }; /** The text of specification, JSON or YAML. */ openapiSpec: string | undefined; /** Gateway connectivity. If specified the gateway will be attached to specified network/subnet(s). */ connectivity?: Connectivity; + /** Options for logging from the API gateway. */ + logOptions?: LogOptions; + /** Values of variables defined in the specification. */ + variables: { [key: string]: VariableInput }; + /** Canary release of the gateway. */ + canary?: Canary; } export interface UpdateApiGatewayRequest_LabelsEntry { @@ -151,6 +172,12 @@ export interface UpdateApiGatewayRequest_LabelsEntry { value: string; } +export interface UpdateApiGatewayRequest_VariablesEntry { + $type: "yandex.cloud.serverless.apigateway.v1.UpdateApiGatewayRequest.VariablesEntry"; + key: string; + value?: VariableInput; +} + export interface DeleteApiGatewayRequest { $type: "yandex.cloud.serverless.apigateway.v1.DeleteApiGatewayRequest"; /** @@ -165,12 +192,6 @@ export interface AddDomainRequest { $type: "yandex.cloud.serverless.apigateway.v1.AddDomainRequest"; /** ID of the API gateway that the domain is attached to. */ apiGatewayId: string; - /** - * ID of the attaching domain. - * - * @deprecated - */ - domainId: string; /** Name of the attaching domain. */ domainName: string; /** ID of certificate for the attaching domain. */ @@ -207,10 +228,12 @@ export interface AddDomainMetadata { $type: "yandex.cloud.serverless.apigateway.v1.AddDomainMetadata"; /** ID of the API gateway that the domain is attached to. */ apiGatewayId: string; - /** ID of the attaching domain. */ + /** ID of the attached domain. */ domainId: string; /** Name of the attaching domain. */ domainName: string; + /** ID of the certificate for provided domain. */ + certificateId: string; } export interface RemoveDomainMetadata { @@ -623,6 +646,23 @@ export const CreateApiGatewayRequest = { writer.uint32(50).fork() ).ldelim(); } + if (message.logOptions !== undefined) { + LogOptions.encode(message.logOptions, writer.uint32(58).fork()).ldelim(); + } + Object.entries(message.variables).forEach(([key, value]) => { + CreateApiGatewayRequest_VariablesEntry.encode( + { + $type: + "yandex.cloud.serverless.apigateway.v1.CreateApiGatewayRequest.VariablesEntry", + key: key as any, + value, + }, + writer.uint32(66).fork() + ).ldelim(); + }); + if (message.canary !== undefined) { + Canary.encode(message.canary, writer.uint32(74).fork()).ldelim(); + } return writer; }, @@ -636,6 +676,7 @@ export const CreateApiGatewayRequest = { ...baseCreateApiGatewayRequest, } as CreateApiGatewayRequest; message.labels = {}; + message.variables = {}; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -663,6 +704,21 @@ export const CreateApiGatewayRequest = { case 6: message.connectivity = Connectivity.decode(reader, reader.uint32()); break; + case 7: + message.logOptions = LogOptions.decode(reader, reader.uint32()); + break; + case 8: + const entry8 = CreateApiGatewayRequest_VariablesEntry.decode( + reader, + reader.uint32() + ); + if (entry8.value !== undefined) { + message.variables[entry8.key] = entry8.value; + } + break; + case 9: + message.canary = Canary.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -701,6 +757,20 @@ export const CreateApiGatewayRequest = { object.connectivity !== undefined && object.connectivity !== null ? Connectivity.fromJSON(object.connectivity) : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromJSON(object.logOptions) + : undefined; + message.variables = Object.entries(object.variables ?? {}).reduce<{ + [key: string]: VariableInput; + }>((acc, [key, value]) => { + acc[key] = VariableInput.fromJSON(value); + return acc; + }, {}); + message.canary = + object.canary !== undefined && object.canary !== null + ? Canary.fromJSON(object.canary) + : undefined; return message; }, @@ -722,6 +792,18 @@ export const CreateApiGatewayRequest = { (obj.connectivity = message.connectivity ? Connectivity.toJSON(message.connectivity) : undefined); + message.logOptions !== undefined && + (obj.logOptions = message.logOptions + ? LogOptions.toJSON(message.logOptions) + : undefined); + obj.variables = {}; + if (message.variables) { + Object.entries(message.variables).forEach(([k, v]) => { + obj.variables[k] = VariableInput.toJSON(v); + }); + } + message.canary !== undefined && + (obj.canary = message.canary ? Canary.toJSON(message.canary) : undefined); return obj; }, @@ -747,6 +829,22 @@ export const CreateApiGatewayRequest = { object.connectivity !== undefined && object.connectivity !== null ? Connectivity.fromPartial(object.connectivity) : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromPartial(object.logOptions) + : undefined; + message.variables = Object.entries(object.variables ?? {}).reduce<{ + [key: string]: VariableInput; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = VariableInput.fromPartial(value); + } + return acc; + }, {}); + message.canary = + object.canary !== undefined && object.canary !== null + ? Canary.fromPartial(object.canary) + : undefined; return message; }, }; @@ -840,6 +938,98 @@ messageTypeRegistry.set( CreateApiGatewayRequest_LabelsEntry ); +const baseCreateApiGatewayRequest_VariablesEntry: object = { + $type: + "yandex.cloud.serverless.apigateway.v1.CreateApiGatewayRequest.VariablesEntry", + key: "", +}; + +export const CreateApiGatewayRequest_VariablesEntry = { + $type: + "yandex.cloud.serverless.apigateway.v1.CreateApiGatewayRequest.VariablesEntry" as const, + + encode( + message: CreateApiGatewayRequest_VariablesEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== undefined) { + VariableInput.encode(message.value, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateApiGatewayRequest_VariablesEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateApiGatewayRequest_VariablesEntry, + } as CreateApiGatewayRequest_VariablesEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = VariableInput.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateApiGatewayRequest_VariablesEntry { + const message = { + ...baseCreateApiGatewayRequest_VariablesEntry, + } as CreateApiGatewayRequest_VariablesEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? VariableInput.fromJSON(object.value) + : undefined; + return message; + }, + + toJSON(message: CreateApiGatewayRequest_VariablesEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && + (obj.value = message.value + ? VariableInput.toJSON(message.value) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateApiGatewayRequest_VariablesEntry { + const message = { + ...baseCreateApiGatewayRequest_VariablesEntry, + } as CreateApiGatewayRequest_VariablesEntry; + message.key = object.key ?? ""; + message.value = + object.value !== undefined && object.value !== null + ? VariableInput.fromPartial(object.value) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + CreateApiGatewayRequest_VariablesEntry.$type, + CreateApiGatewayRequest_VariablesEntry +); + const baseUpdateApiGatewayRequest: object = { $type: "yandex.cloud.serverless.apigateway.v1.UpdateApiGatewayRequest", apiGatewayId: "", @@ -887,6 +1077,23 @@ export const UpdateApiGatewayRequest = { writer.uint32(58).fork() ).ldelim(); } + if (message.logOptions !== undefined) { + LogOptions.encode(message.logOptions, writer.uint32(66).fork()).ldelim(); + } + Object.entries(message.variables).forEach(([key, value]) => { + UpdateApiGatewayRequest_VariablesEntry.encode( + { + $type: + "yandex.cloud.serverless.apigateway.v1.UpdateApiGatewayRequest.VariablesEntry", + key: key as any, + value, + }, + writer.uint32(74).fork() + ).ldelim(); + }); + if (message.canary !== undefined) { + Canary.encode(message.canary, writer.uint32(82).fork()).ldelim(); + } return writer; }, @@ -900,6 +1107,7 @@ export const UpdateApiGatewayRequest = { ...baseUpdateApiGatewayRequest, } as UpdateApiGatewayRequest; message.labels = {}; + message.variables = {}; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -930,6 +1138,21 @@ export const UpdateApiGatewayRequest = { case 7: message.connectivity = Connectivity.decode(reader, reader.uint32()); break; + case 8: + message.logOptions = LogOptions.decode(reader, reader.uint32()); + break; + case 9: + const entry9 = UpdateApiGatewayRequest_VariablesEntry.decode( + reader, + reader.uint32() + ); + if (entry9.value !== undefined) { + message.variables[entry9.key] = entry9.value; + } + break; + case 10: + message.canary = Canary.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -972,6 +1195,20 @@ export const UpdateApiGatewayRequest = { object.connectivity !== undefined && object.connectivity !== null ? Connectivity.fromJSON(object.connectivity) : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromJSON(object.logOptions) + : undefined; + message.variables = Object.entries(object.variables ?? {}).reduce<{ + [key: string]: VariableInput; + }>((acc, [key, value]) => { + acc[key] = VariableInput.fromJSON(value); + return acc; + }, {}); + message.canary = + object.canary !== undefined && object.canary !== null + ? Canary.fromJSON(object.canary) + : undefined; return message; }, @@ -998,6 +1235,18 @@ export const UpdateApiGatewayRequest = { (obj.connectivity = message.connectivity ? Connectivity.toJSON(message.connectivity) : undefined); + message.logOptions !== undefined && + (obj.logOptions = message.logOptions + ? LogOptions.toJSON(message.logOptions) + : undefined); + obj.variables = {}; + if (message.variables) { + Object.entries(message.variables).forEach(([k, v]) => { + obj.variables[k] = VariableInput.toJSON(v); + }); + } + message.canary !== undefined && + (obj.canary = message.canary ? Canary.toJSON(message.canary) : undefined); return obj; }, @@ -1027,6 +1276,22 @@ export const UpdateApiGatewayRequest = { object.connectivity !== undefined && object.connectivity !== null ? Connectivity.fromPartial(object.connectivity) : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromPartial(object.logOptions) + : undefined; + message.variables = Object.entries(object.variables ?? {}).reduce<{ + [key: string]: VariableInput; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = VariableInput.fromPartial(value); + } + return acc; + }, {}); + message.canary = + object.canary !== undefined && object.canary !== null + ? Canary.fromPartial(object.canary) + : undefined; return message; }, }; @@ -1120,6 +1385,98 @@ messageTypeRegistry.set( UpdateApiGatewayRequest_LabelsEntry ); +const baseUpdateApiGatewayRequest_VariablesEntry: object = { + $type: + "yandex.cloud.serverless.apigateway.v1.UpdateApiGatewayRequest.VariablesEntry", + key: "", +}; + +export const UpdateApiGatewayRequest_VariablesEntry = { + $type: + "yandex.cloud.serverless.apigateway.v1.UpdateApiGatewayRequest.VariablesEntry" as const, + + encode( + message: UpdateApiGatewayRequest_VariablesEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== undefined) { + VariableInput.encode(message.value, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateApiGatewayRequest_VariablesEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateApiGatewayRequest_VariablesEntry, + } as UpdateApiGatewayRequest_VariablesEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = VariableInput.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateApiGatewayRequest_VariablesEntry { + const message = { + ...baseUpdateApiGatewayRequest_VariablesEntry, + } as UpdateApiGatewayRequest_VariablesEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? VariableInput.fromJSON(object.value) + : undefined; + return message; + }, + + toJSON(message: UpdateApiGatewayRequest_VariablesEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && + (obj.value = message.value + ? VariableInput.toJSON(message.value) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateApiGatewayRequest_VariablesEntry { + const message = { + ...baseUpdateApiGatewayRequest_VariablesEntry, + } as UpdateApiGatewayRequest_VariablesEntry; + message.key = object.key ?? ""; + message.value = + object.value !== undefined && object.value !== null + ? VariableInput.fromPartial(object.value) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateApiGatewayRequest_VariablesEntry.$type, + UpdateApiGatewayRequest_VariablesEntry +); + const baseDeleteApiGatewayRequest: object = { $type: "yandex.cloud.serverless.apigateway.v1.DeleteApiGatewayRequest", apiGatewayId: "", @@ -1196,7 +1553,6 @@ messageTypeRegistry.set(DeleteApiGatewayRequest.$type, DeleteApiGatewayRequest); const baseAddDomainRequest: object = { $type: "yandex.cloud.serverless.apigateway.v1.AddDomainRequest", apiGatewayId: "", - domainId: "", domainName: "", certificateId: "", }; @@ -1211,9 +1567,6 @@ export const AddDomainRequest = { if (message.apiGatewayId !== "") { writer.uint32(10).string(message.apiGatewayId); } - if (message.domainId !== "") { - writer.uint32(18).string(message.domainId); - } if (message.domainName !== "") { writer.uint32(26).string(message.domainName); } @@ -1233,9 +1586,6 @@ export const AddDomainRequest = { case 1: message.apiGatewayId = reader.string(); break; - case 2: - message.domainId = reader.string(); - break; case 3: message.domainName = reader.string(); break; @@ -1256,10 +1606,6 @@ export const AddDomainRequest = { object.apiGatewayId !== undefined && object.apiGatewayId !== null ? String(object.apiGatewayId) : ""; - message.domainId = - object.domainId !== undefined && object.domainId !== null - ? String(object.domainId) - : ""; message.domainName = object.domainName !== undefined && object.domainName !== null ? String(object.domainName) @@ -1275,7 +1621,6 @@ export const AddDomainRequest = { const obj: any = {}; message.apiGatewayId !== undefined && (obj.apiGatewayId = message.apiGatewayId); - message.domainId !== undefined && (obj.domainId = message.domainId); message.domainName !== undefined && (obj.domainName = message.domainName); message.certificateId !== undefined && (obj.certificateId = message.certificateId); @@ -1287,7 +1632,6 @@ export const AddDomainRequest = { ): AddDomainRequest { const message = { ...baseAddDomainRequest } as AddDomainRequest; message.apiGatewayId = object.apiGatewayId ?? ""; - message.domainId = object.domainId ?? ""; message.domainName = object.domainName ?? ""; message.certificateId = object.certificateId ?? ""; return message; @@ -1605,6 +1949,7 @@ const baseAddDomainMetadata: object = { apiGatewayId: "", domainId: "", domainName: "", + certificateId: "", }; export const AddDomainMetadata = { @@ -1623,6 +1968,9 @@ export const AddDomainMetadata = { if (message.domainName !== "") { writer.uint32(26).string(message.domainName); } + if (message.certificateId !== "") { + writer.uint32(34).string(message.certificateId); + } return writer; }, @@ -1642,6 +1990,9 @@ export const AddDomainMetadata = { case 3: message.domainName = reader.string(); break; + case 4: + message.certificateId = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1664,6 +2015,10 @@ export const AddDomainMetadata = { object.domainName !== undefined && object.domainName !== null ? String(object.domainName) : ""; + message.certificateId = + object.certificateId !== undefined && object.certificateId !== null + ? String(object.certificateId) + : ""; return message; }, @@ -1673,6 +2028,8 @@ export const AddDomainMetadata = { (obj.apiGatewayId = message.apiGatewayId); message.domainId !== undefined && (obj.domainId = message.domainId); message.domainName !== undefined && (obj.domainName = message.domainName); + message.certificateId !== undefined && + (obj.certificateId = message.certificateId); return obj; }, @@ -1683,6 +2040,7 @@ export const AddDomainMetadata = { message.apiGatewayId = object.apiGatewayId ?? ""; message.domainId = object.domainId ?? ""; message.domainName = object.domainName ?? ""; + message.certificateId = object.certificateId ?? ""; return message; }, }; @@ -2125,7 +2483,7 @@ messageTypeRegistry.set(GetOpenapiSpecResponse.$type, GetOpenapiSpecResponse); export const ApiGatewayServiceService = { /** * Returns the specified API gateway. Note that only API gateway basic attributes are returned. - * To get associated openapi specification, make a [GetOpenapiSpec] request. + * To get associated openapi specification, make a [GetOpenapiSpec](#GetOpenapiSpec) request. * * To get the list of all available API gateways, make a [List] request. */ @@ -2287,7 +2645,7 @@ export const ApiGatewayServiceService = { export interface ApiGatewayServiceServer extends UntypedServiceImplementation { /** * Returns the specified API gateway. Note that only API gateway basic attributes are returned. - * To get associated openapi specification, make a [GetOpenapiSpec] request. + * To get associated openapi specification, make a [GetOpenapiSpec](#GetOpenapiSpec) request. * * To get the list of all available API gateways, make a [List] request. */ @@ -2328,7 +2686,7 @@ export interface ApiGatewayServiceServer extends UntypedServiceImplementation { export interface ApiGatewayServiceClient extends Client { /** * Returns the specified API gateway. Note that only API gateway basic attributes are returned. - * To get associated openapi specification, make a [GetOpenapiSpec] request. + * To get associated openapi specification, make a [GetOpenapiSpec](#GetOpenapiSpec) request. * * To get the list of all available API gateways, make a [List] request. */ diff --git a/src/generated/yandex/cloud/serverless/containers/v1/container.ts b/src/generated/yandex/cloud/serverless/containers/v1/container.ts index 95e2d564..66daa67d 100644 --- a/src/generated/yandex/cloud/serverless/containers/v1/container.ts +++ b/src/generated/yandex/cloud/serverless/containers/v1/container.ts @@ -3,27 +3,44 @@ import { messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import _m0 from "protobufjs/minimal"; import { Duration } from "../../../../../google/protobuf/duration"; +import { + LogLevel_Level, + logLevel_LevelFromJSON, + logLevel_LevelToJSON, +} from "../../../../../yandex/cloud/logging/v1/log_entry"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.serverless.containers.v1"; export interface Container { $type: "yandex.cloud.serverless.containers.v1.Container"; + /** ID of the container. Generated at creation time. */ id: string; + /** ID of the folder that the container belongs to. */ folderId: string; + /** Creation timestamp for the container. */ createdAt?: Date; + /** Name of the container. The name is unique within the folder. */ name: string; + /** Description of the container. */ description: string; + /** Container labels as `key:value` pairs. */ labels: { [key: string]: string }; + /** URL that needs to be requested to call the container. */ url: string; + /** Status of the container. */ status: Container_Status; } export enum Container_Status { STATUS_UNSPECIFIED = 0, + /** CREATING - Container is being created. */ CREATING = 1, + /** ACTIVE - Container is ready for use. */ ACTIVE = 2, + /** DELETING - Container is being deleted. */ DELETING = 3, + /** ERROR - Container failed. The only allowed action is delete. */ ERROR = 4, UNRECOGNIZED = -1, } @@ -77,25 +94,55 @@ export interface Container_LabelsEntry { export interface Revision { $type: "yandex.cloud.serverless.containers.v1.Revision"; + /** ID of the revision. */ id: string; + /** ID of the container that the revision belongs to. */ containerId: string; + /** Description of the revision. */ description: string; + /** Creation timestamp for the revision. */ createdAt?: Date; + /** Image configuration for the revision. */ image?: Image; + /** Resources allocated to the revision. */ resources?: Resources; + /** + * Timeout for the execution of the revision. + * + * If the timeout is exceeded, Serverless Containers responds with a 504 HTTP code. + */ executionTimeout?: Duration; + /** The number of concurrent requests allowed per container instance. */ concurrency: number; + /** ID of the service account associated with the revision. */ serviceAccountId: string; + /** Status of the revision. */ status: Revision_Status; + /** Yandex Lockbox secrets to be used by the revision. */ secrets: Secret[]; + /** Network access. If specified the revision will be attached to specified network/subnet(s). */ connectivity?: Connectivity; + /** + * Policy for provisioning instances of the revision. + * + * The policy is only applied when the revision is ACTIVE. + */ provisionPolicy?: ProvisionPolicy; + /** Policy for scaling instances of the revision. */ + scalingPolicy?: ScalingPolicy; + /** Options for logging from the container. */ + logOptions?: LogOptions; + /** S3 mounts to be used by the version. */ + storageMounts: StorageMount[]; } export enum Revision_Status { STATUS_UNSPECIFIED = 0, + /** CREATING - Revision is being created. */ CREATING = 1, + /** ACTIVE - Revision is currently used by the container. */ ACTIVE = 2, + /** OBSOLETE - Revision is not used by the container. May be deleted later. */ OBSOLETE = 3, UNRECOGNIZED = -1, } @@ -136,13 +183,20 @@ export function revision_StatusToJSON(object: Revision_Status): string { } } +/** Revision image specification. */ export interface Image { $type: "yandex.cloud.serverless.containers.v1.Image"; + /** Image URL, that is used by the revision. */ imageUrl: string; + /** Digest of the image. Calculated at creation time. */ imageDigest: string; + /** Override for the image's ENTRYPOINT. */ command?: Command; + /** Override for the image's CMD. */ args?: Args; + /** Additional environment for the container. */ environment: { [key: string]: string }; + /** Override for the image's WORKDIR. */ workingDir: string; } @@ -154,40 +208,119 @@ export interface Image_EnvironmentEntry { export interface Command { $type: "yandex.cloud.serverless.containers.v1.Command"; + /** + * Command that will override ENTRYPOINT of an image. + * + * Commands will be executed as is. The runtime will not substitute environment + * variables or execute shell commands. If one wants to do that, they should + * invoke shell interpreter with an appropriate shell script. + */ command: string[]; } export interface Args { $type: "yandex.cloud.serverless.containers.v1.Args"; + /** + * Arguments that will override CMD of an image. + * + * Arguments will be passed as is. The runtime will not substitute environment + * variables or execute shell commands. If one wants to do that, they should + * invoke shell interpreter with an appropriate shell script. + */ args: string[]; } +/** Resources allocated to a revision. */ export interface Resources { $type: "yandex.cloud.serverless.containers.v1.Resources"; + /** Amount of memory available to the revision, specified in bytes, multiple of 128MB. */ memory: number; + /** Number of cores available to the revision. */ cores: number; + /** + * Specifies baseline performance for a core in percent, multiple of 5%. + * Should be 100% for cores > 1. + */ coreFraction: number; } export interface ProvisionPolicy { $type: "yandex.cloud.serverless.containers.v1.ProvisionPolicy"; + /** + * Minimum number of guaranteed provisioned container instances for all zones + * in total. + */ minInstances: number; } +/** Secret that is available to the container at run time. */ export interface Secret { $type: "yandex.cloud.serverless.containers.v1.Secret"; + /** ID of Yandex Lockbox secret. */ id: string; + /** ID of Yandex Lockbox secret. */ versionId: string; + /** Key in secret's payload, which value to be delivered into container environment. */ key: string; + /** Environment variable in which secret's value is delivered. */ environmentVariable: string | undefined; } +/** Revision connectivity specification. */ export interface Connectivity { $type: "yandex.cloud.serverless.containers.v1.Connectivity"; + /** Network the revision will have access to. */ networkId: string; + /** + * The list of subnets (from the same network) the revision can be attached to. + * + * Deprecated, it is sufficient to specify only network_id, without the list of subnet_ids. + */ subnetIds: string[]; } +export interface LogOptions { + $type: "yandex.cloud.serverless.containers.v1.LogOptions"; + /** Is logging from container disabled. */ + disabled: boolean; + /** Entry should be written to log group resolved by ID. */ + logGroupId: string | undefined; + /** Entry should be written to default log group for specified folder. */ + folderId: string | undefined; + /** + * Minimum log entry level. + * + * See [LogLevel.Level] for details. + */ + minLevel: LogLevel_Level; +} + +export interface ScalingPolicy { + $type: "yandex.cloud.serverless.containers.v1.ScalingPolicy"; + /** + * Upper limit for instance count in each zone. + * 0 means no limit. + */ + zoneInstancesLimit: number; + /** + * Upper limit of requests count in each zone. + * 0 means no limit. + */ + zoneRequestsLimit: number; +} + +export interface StorageMount { + $type: "yandex.cloud.serverless.containers.v1.StorageMount"; + /** S3 bucket name for mounting. */ + bucketId: string; + /** S3 bucket prefix for mounting. */ + prefix: string; + /** Is mount read only. */ + readOnly: boolean; + /** Mount point path inside the container for mounting. */ + mountPointPath: string; +} + const baseContainer: object = { $type: "yandex.cloud.serverless.containers.v1.Container", id: "", @@ -512,6 +645,18 @@ export const Revision = { writer.uint32(106).fork() ).ldelim(); } + if (message.scalingPolicy !== undefined) { + ScalingPolicy.encode( + message.scalingPolicy, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.logOptions !== undefined) { + LogOptions.encode(message.logOptions, writer.uint32(122).fork()).ldelim(); + } + for (const v of message.storageMounts) { + StorageMount.encode(v!, writer.uint32(130).fork()).ldelim(); + } return writer; }, @@ -520,6 +665,7 @@ export const Revision = { let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseRevision } as Revision; message.secrets = []; + message.storageMounts = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -567,6 +713,17 @@ export const Revision = { reader.uint32() ); break; + case 14: + message.scalingPolicy = ScalingPolicy.decode(reader, reader.uint32()); + break; + case 15: + message.logOptions = LogOptions.decode(reader, reader.uint32()); + break; + case 16: + message.storageMounts.push( + StorageMount.decode(reader, reader.uint32()) + ); + break; default: reader.skipType(tag & 7); break; @@ -626,6 +783,17 @@ export const Revision = { object.provisionPolicy !== undefined && object.provisionPolicy !== null ? ProvisionPolicy.fromJSON(object.provisionPolicy) : undefined; + message.scalingPolicy = + object.scalingPolicy !== undefined && object.scalingPolicy !== null + ? ScalingPolicy.fromJSON(object.scalingPolicy) + : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromJSON(object.logOptions) + : undefined; + message.storageMounts = (object.storageMounts ?? []).map((e: any) => + StorageMount.fromJSON(e) + ); return message; }, @@ -669,6 +837,21 @@ export const Revision = { (obj.provisionPolicy = message.provisionPolicy ? ProvisionPolicy.toJSON(message.provisionPolicy) : undefined); + message.scalingPolicy !== undefined && + (obj.scalingPolicy = message.scalingPolicy + ? ScalingPolicy.toJSON(message.scalingPolicy) + : undefined); + message.logOptions !== undefined && + (obj.logOptions = message.logOptions + ? LogOptions.toJSON(message.logOptions) + : undefined); + if (message.storageMounts) { + obj.storageMounts = message.storageMounts.map((e) => + e ? StorageMount.toJSON(e) : undefined + ); + } else { + obj.storageMounts = []; + } return obj; }, @@ -702,6 +885,16 @@ export const Revision = { object.provisionPolicy !== undefined && object.provisionPolicy !== null ? ProvisionPolicy.fromPartial(object.provisionPolicy) : undefined; + message.scalingPolicy = + object.scalingPolicy !== undefined && object.scalingPolicy !== null + ? ScalingPolicy.fromPartial(object.scalingPolicy) + : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromPartial(object.logOptions) + : undefined; + message.storageMounts = + object.storageMounts?.map((e) => StorageMount.fromPartial(e)) || []; return message; }, }; @@ -1386,6 +1579,287 @@ export const Connectivity = { messageTypeRegistry.set(Connectivity.$type, Connectivity); +const baseLogOptions: object = { + $type: "yandex.cloud.serverless.containers.v1.LogOptions", + disabled: false, + minLevel: 0, +}; + +export const LogOptions = { + $type: "yandex.cloud.serverless.containers.v1.LogOptions" as const, + + encode( + message: LogOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.disabled === true) { + writer.uint32(8).bool(message.disabled); + } + if (message.logGroupId !== undefined) { + writer.uint32(18).string(message.logGroupId); + } + if (message.folderId !== undefined) { + writer.uint32(26).string(message.folderId); + } + if (message.minLevel !== 0) { + writer.uint32(32).int32(message.minLevel); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LogOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLogOptions } as LogOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.disabled = reader.bool(); + break; + case 2: + message.logGroupId = reader.string(); + break; + case 3: + message.folderId = reader.string(); + break; + case 4: + message.minLevel = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LogOptions { + const message = { ...baseLogOptions } as LogOptions; + message.disabled = + object.disabled !== undefined && object.disabled !== null + ? Boolean(object.disabled) + : false; + message.logGroupId = + object.logGroupId !== undefined && object.logGroupId !== null + ? String(object.logGroupId) + : undefined; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : undefined; + message.minLevel = + object.minLevel !== undefined && object.minLevel !== null + ? logLevel_LevelFromJSON(object.minLevel) + : 0; + return message; + }, + + toJSON(message: LogOptions): unknown { + const obj: any = {}; + message.disabled !== undefined && (obj.disabled = message.disabled); + message.logGroupId !== undefined && (obj.logGroupId = message.logGroupId); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.minLevel !== undefined && + (obj.minLevel = logLevel_LevelToJSON(message.minLevel)); + return obj; + }, + + fromPartial, I>>( + object: I + ): LogOptions { + const message = { ...baseLogOptions } as LogOptions; + message.disabled = object.disabled ?? false; + message.logGroupId = object.logGroupId ?? undefined; + message.folderId = object.folderId ?? undefined; + message.minLevel = object.minLevel ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(LogOptions.$type, LogOptions); + +const baseScalingPolicy: object = { + $type: "yandex.cloud.serverless.containers.v1.ScalingPolicy", + zoneInstancesLimit: 0, + zoneRequestsLimit: 0, +}; + +export const ScalingPolicy = { + $type: "yandex.cloud.serverless.containers.v1.ScalingPolicy" as const, + + encode( + message: ScalingPolicy, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.zoneInstancesLimit !== 0) { + writer.uint32(8).int64(message.zoneInstancesLimit); + } + if (message.zoneRequestsLimit !== 0) { + writer.uint32(16).int64(message.zoneRequestsLimit); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ScalingPolicy { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseScalingPolicy } as ScalingPolicy; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.zoneInstancesLimit = longToNumber(reader.int64() as Long); + break; + case 2: + message.zoneRequestsLimit = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ScalingPolicy { + const message = { ...baseScalingPolicy } as ScalingPolicy; + message.zoneInstancesLimit = + object.zoneInstancesLimit !== undefined && + object.zoneInstancesLimit !== null + ? Number(object.zoneInstancesLimit) + : 0; + message.zoneRequestsLimit = + object.zoneRequestsLimit !== undefined && + object.zoneRequestsLimit !== null + ? Number(object.zoneRequestsLimit) + : 0; + return message; + }, + + toJSON(message: ScalingPolicy): unknown { + const obj: any = {}; + message.zoneInstancesLimit !== undefined && + (obj.zoneInstancesLimit = Math.round(message.zoneInstancesLimit)); + message.zoneRequestsLimit !== undefined && + (obj.zoneRequestsLimit = Math.round(message.zoneRequestsLimit)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ScalingPolicy { + const message = { ...baseScalingPolicy } as ScalingPolicy; + message.zoneInstancesLimit = object.zoneInstancesLimit ?? 0; + message.zoneRequestsLimit = object.zoneRequestsLimit ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ScalingPolicy.$type, ScalingPolicy); + +const baseStorageMount: object = { + $type: "yandex.cloud.serverless.containers.v1.StorageMount", + bucketId: "", + prefix: "", + readOnly: false, + mountPointPath: "", +}; + +export const StorageMount = { + $type: "yandex.cloud.serverless.containers.v1.StorageMount" as const, + + encode( + message: StorageMount, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.bucketId !== "") { + writer.uint32(10).string(message.bucketId); + } + if (message.prefix !== "") { + writer.uint32(18).string(message.prefix); + } + if (message.readOnly === true) { + writer.uint32(32).bool(message.readOnly); + } + if (message.mountPointPath !== "") { + writer.uint32(42).string(message.mountPointPath); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StorageMount { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStorageMount } as StorageMount; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.bucketId = reader.string(); + break; + case 2: + message.prefix = reader.string(); + break; + case 4: + message.readOnly = reader.bool(); + break; + case 5: + message.mountPointPath = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StorageMount { + const message = { ...baseStorageMount } as StorageMount; + message.bucketId = + object.bucketId !== undefined && object.bucketId !== null + ? String(object.bucketId) + : ""; + message.prefix = + object.prefix !== undefined && object.prefix !== null + ? String(object.prefix) + : ""; + message.readOnly = + object.readOnly !== undefined && object.readOnly !== null + ? Boolean(object.readOnly) + : false; + message.mountPointPath = + object.mountPointPath !== undefined && object.mountPointPath !== null + ? String(object.mountPointPath) + : ""; + return message; + }, + + toJSON(message: StorageMount): unknown { + const obj: any = {}; + message.bucketId !== undefined && (obj.bucketId = message.bucketId); + message.prefix !== undefined && (obj.prefix = message.prefix); + message.readOnly !== undefined && (obj.readOnly = message.readOnly); + message.mountPointPath !== undefined && + (obj.mountPointPath = message.mountPointPath); + return obj; + }, + + fromPartial, I>>( + object: I + ): StorageMount { + const message = { ...baseStorageMount } as StorageMount; + message.bucketId = object.bucketId ?? ""; + message.prefix = object.prefix ?? ""; + message.readOnly = object.readOnly ?? false; + message.mountPointPath = object.mountPointPath ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StorageMount.$type, StorageMount); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts b/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts index 93308962..728d858c 100644 --- a/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts +++ b/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts @@ -19,11 +19,14 @@ import { Resources, Connectivity, ProvisionPolicy, + ScalingPolicy, + LogOptions, Command, Args, Container, Revision, Secret, + StorageMount, } from "../../../../../yandex/cloud/serverless/containers/v1/container"; import { Duration } from "../../../../../google/protobuf/duration"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; @@ -38,28 +41,77 @@ export const protobufPackage = "yandex.cloud.serverless.containers.v1"; export interface GetContainerRequest { $type: "yandex.cloud.serverless.containers.v1.GetContainerRequest"; + /** + * ID of the container to return. + * + * To get a container ID make a [ContainerService.List] request. + */ containerId: string; } export interface ListContainersRequest { $type: "yandex.cloud.serverless.containers.v1.ListContainersRequest"; + /** + * ID of the folder to list containers in. + * + * To get a folder ID make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ folderId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than `pageSize`, the service returns a [ListContainersResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * + * Default value: 100. + */ pageSize: number; + /** + * Page token. To get the next page of results, set `pageToken` to the + * [ListContainersResponse.next_page_token] returned by a previous list request. + */ pageToken: string; + /** + * A filter expression that filters containers listed in the response. + * + * The expression must specify: + * 1. The field name. Currently filtering can only be applied to the [Container.name] field. + * 2. An `=` operator. + * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. + * Example of a filter: `name="my-container"`. + */ filter: string; } export interface ListContainersResponse { $type: "yandex.cloud.serverless.containers.v1.ListContainersResponse"; + /** List of containers in the specified folder. */ containers: Container[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListContainersRequest.page_size], use `nextPageToken` as the value + * for the [ListContainersRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `nextPageToken` to continue paging through the results. + */ nextPageToken: string; } export interface CreateContainerRequest { $type: "yandex.cloud.serverless.containers.v1.CreateContainerRequest"; + /** + * ID of the folder to create a container in. + * + * To get a folder ID make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ folderId: string; + /** + * Name of the container. + * The name must be unique within the folder. + */ name: string; + /** Description of the container. */ description: string; + /** Resource labels as `key:value` pairs. */ labels: { [key: string]: string }; } @@ -71,15 +123,33 @@ export interface CreateContainerRequest_LabelsEntry { export interface CreateContainerMetadata { $type: "yandex.cloud.serverless.containers.v1.CreateContainerMetadata"; + /** ID of the container that is being created. */ containerId: string; } export interface UpdateContainerRequest { $type: "yandex.cloud.serverless.containers.v1.UpdateContainerRequest"; + /** + * ID of the container to update. + * + * To get a container ID make a [ContainerService.List] request. + */ containerId: string; + /** Field mask that specifies which attributes of the container should be updated. */ updateMask?: FieldMask; + /** + * New name for the container. + * The name must be unique within the folder. + */ name: string; + /** New description for the container. */ description: string; + /** + * Container labels as `key:value` pairs. + * + * Existing set of labels is completely replaced by the provided set, so if you just want + * to add or remove a label, request the current set of labels with a [ContainerService.Get] request. + */ labels: { [key: string]: string }; } @@ -91,59 +161,144 @@ export interface UpdateContainerRequest_LabelsEntry { export interface UpdateContainerMetadata { $type: "yandex.cloud.serverless.containers.v1.UpdateContainerMetadata"; + /** ID of the container that is being updated. */ containerId: string; } export interface DeleteContainerRequest { $type: "yandex.cloud.serverless.containers.v1.DeleteContainerRequest"; + /** + * ID of the container to delete. + * To get a container ID make a [ContainerService.List] request. + */ containerId: string; } export interface DeleteContainerMetadata { $type: "yandex.cloud.serverless.containers.v1.DeleteContainerMetadata"; + /** ID of the container that is being deleted. */ containerId: string; } export interface GetContainerRevisionRequest { $type: "yandex.cloud.serverless.containers.v1.GetContainerRevisionRequest"; + /** + * ID of the revision to return. + * + * To get a revision ID make a [ContainerService.ListRevisions] request. + */ containerRevisionId: string; } export interface ListContainersRevisionsRequest { $type: "yandex.cloud.serverless.containers.v1.ListContainersRevisionsRequest"; + /** + * ID of the folder to list container revisions for. + * To get a folder ID make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ folderId: string | undefined; + /** + * ID of the container to list revisions for. + * To get a container ID use a [ContainerService.List] request. + */ containerId: string | undefined; + /** + * The maximum number of results per page to return. If the number of available results + * is larger than `pageSize`, the service returns a [ListContainersRevisionsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * + * Default value: 100. + */ pageSize: number; + /** + * Page token. To get the next page of results, set `pageToken` to the + * [ListContainersRevisionsResponse.next_page_token] returned by a previous list request. + */ pageToken: string; + /** + * A filter expression that filters resources listed in the response. + * + * The expression must specify: + * 1. The field name. Currently filtering can only be applied to the [Revision.status] and [Revision.runtime] fields. + * 2. An `=` operator. + * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. + * Example of a filter: `status="ACTIVE"`. + */ filter: string; } export interface ListContainersRevisionsResponse { $type: "yandex.cloud.serverless.containers.v1.ListContainersRevisionsResponse"; + /** List of revisions for the specified folder or container. */ revisions: Revision[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListContainersRevisionsRequest.page_size], use `nextPageToken` as the value + * for the [ListContainersRevisionsRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `nextPageToken` to continue paging through the results. + */ nextPageToken: string; } export interface DeployContainerRevisionRequest { $type: "yandex.cloud.serverless.containers.v1.DeployContainerRevisionRequest"; + /** + * ID of the container to create a revision for. + * + * To get a container ID, make a [ContainerService.List] request. + */ containerId: string; + /** Description of the revision. */ description: string; + /** Resources allocated to the revision. */ resources?: Resources; + /** + * Timeout for the execution of the revision. + * + * If the timeout is exceeded, Serverless Containers responds with a 504 HTTP code. + */ executionTimeout?: Duration; + /** ID of the service account to associate with the revision. */ serviceAccountId: string; + /** Image configuration for the revision. */ imageSpec?: ImageSpec; + /** + * The number of concurrent requests allowed per container instance. + * + * The default value is 1. + */ concurrency: number; + /** Yandex Lockbox secrets to be used by the revision. */ secrets: Secret[]; + /** Network access. If specified the revision will be attached to specified network/subnet(s). */ connectivity?: Connectivity; + /** + * Policy for provisioning instances of the revision. + * + * The policy is only applied when the revision is ACTIVE. + */ provisionPolicy?: ProvisionPolicy; + /** Policy for scaling instances of the revision. */ + scalingPolicy?: ScalingPolicy; + /** Options for logging from the container. */ + logOptions?: LogOptions; + /** S3 mounts to be used by the version. */ + storageMounts: StorageMount[]; } +/** Revision image specification. */ export interface ImageSpec { $type: "yandex.cloud.serverless.containers.v1.ImageSpec"; + /** Image URL, that is used by the revision. */ imageUrl: string; + /** Override for the image's ENTRYPOINT. */ command?: Command; + /** Override for the image's CMD. */ args?: Args; + /** Additional environment for the container. */ environment: { [key: string]: string }; + /** Override for the image's WORKDIR. */ workingDir: string; } @@ -155,32 +310,74 @@ export interface ImageSpec_EnvironmentEntry { export interface DeployContainerRevisionMetadata { $type: "yandex.cloud.serverless.containers.v1.DeployContainerRevisionMetadata"; + /** ID of the revision that is being created. */ containerRevisionId: string; } export interface RollbackContainerRequest { $type: "yandex.cloud.serverless.containers.v1.RollbackContainerRequest"; + /** + * ID of the container to rollback to an old revision. + * + * To get a container ID, make a [ContainerService.List] request. + */ containerId: string; + /** + * ID of the revision to rollback to. + * + * To get a revision ID make a [ContainerService.ListRevisions] request. + */ revisionId: string; } export interface RollbackContainerMetadata { $type: "yandex.cloud.serverless.containers.v1.RollbackContainerMetadata"; + /** ID of the container that is being rolled back. */ containerId: string; + /** ID of the revision that the container is being rolled back to. */ revisionId: string; } export interface ListContainerOperationsRequest { $type: "yandex.cloud.serverless.containers.v1.ListContainerOperationsRequest"; + /** ID of the container to list operations for. */ containerId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than `pageSize`, the service returns a [ListContainerOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * + * Default value: 100. + */ pageSize: number; + /** + * Page token. To get the next page of results, set `pageToken` to the + * [ListContainerOperationsResponse.next_page_token] returned by a previous list request. + */ pageToken: string; + /** + * A filter expression that filters resources listed in the response. + * + * The expression must specify: + * 1. The field name. Currently filtering can be applied to the [operation.Operation.done], [operation.Operation.created_by] field. + * 2. An `=` operator. + * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. + * Examples of a filter: `done=false`, `created_by='John.Doe'`. + */ filter: string; } export interface ListContainerOperationsResponse { $type: "yandex.cloud.serverless.containers.v1.ListContainerOperationsResponse"; + /** List of operations for the specified container. */ operations: Operation[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListContainerOperationsRequest.page_size], use `nextPageToken` as the value + * for the [ListContainerOperationsRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `nextPageToken` to continue paging through the results. + */ nextPageToken: string; } @@ -1540,6 +1737,18 @@ export const DeployContainerRevisionRequest = { writer.uint32(98).fork() ).ldelim(); } + if (message.scalingPolicy !== undefined) { + ScalingPolicy.encode( + message.scalingPolicy, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.logOptions !== undefined) { + LogOptions.encode(message.logOptions, writer.uint32(114).fork()).ldelim(); + } + for (const v of message.storageMounts) { + StorageMount.encode(v!, writer.uint32(122).fork()).ldelim(); + } return writer; }, @@ -1553,6 +1762,7 @@ export const DeployContainerRevisionRequest = { ...baseDeployContainerRevisionRequest, } as DeployContainerRevisionRequest; message.secrets = []; + message.storageMounts = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1589,6 +1799,17 @@ export const DeployContainerRevisionRequest = { reader.uint32() ); break; + case 13: + message.scalingPolicy = ScalingPolicy.decode(reader, reader.uint32()); + break; + case 14: + message.logOptions = LogOptions.decode(reader, reader.uint32()); + break; + case 15: + message.storageMounts.push( + StorageMount.decode(reader, reader.uint32()) + ); + break; default: reader.skipType(tag & 7); break; @@ -1640,6 +1861,17 @@ export const DeployContainerRevisionRequest = { object.provisionPolicy !== undefined && object.provisionPolicy !== null ? ProvisionPolicy.fromJSON(object.provisionPolicy) : undefined; + message.scalingPolicy = + object.scalingPolicy !== undefined && object.scalingPolicy !== null + ? ScalingPolicy.fromJSON(object.scalingPolicy) + : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromJSON(object.logOptions) + : undefined; + message.storageMounts = (object.storageMounts ?? []).map((e: any) => + StorageMount.fromJSON(e) + ); return message; }, @@ -1680,6 +1912,21 @@ export const DeployContainerRevisionRequest = { (obj.provisionPolicy = message.provisionPolicy ? ProvisionPolicy.toJSON(message.provisionPolicy) : undefined); + message.scalingPolicy !== undefined && + (obj.scalingPolicy = message.scalingPolicy + ? ScalingPolicy.toJSON(message.scalingPolicy) + : undefined); + message.logOptions !== undefined && + (obj.logOptions = message.logOptions + ? LogOptions.toJSON(message.logOptions) + : undefined); + if (message.storageMounts) { + obj.storageMounts = message.storageMounts.map((e) => + e ? StorageMount.toJSON(e) : undefined + ); + } else { + obj.storageMounts = []; + } return obj; }, @@ -1714,6 +1961,16 @@ export const DeployContainerRevisionRequest = { object.provisionPolicy !== undefined && object.provisionPolicy !== null ? ProvisionPolicy.fromPartial(object.provisionPolicy) : undefined; + message.scalingPolicy = + object.scalingPolicy !== undefined && object.scalingPolicy !== null + ? ScalingPolicy.fromPartial(object.scalingPolicy) + : undefined; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromPartial(object.logOptions) + : undefined; + message.storageMounts = + object.storageMounts?.map((e) => StorageMount.fromPartial(e)) || []; return message; }, }; @@ -2427,7 +2684,13 @@ messageTypeRegistry.set( ListContainerOperationsResponse ); +/** A set of methods for managing serverless containers. */ export const ContainerServiceService = { + /** + * Returns the specified container. + * + * To get the list of all available containers, make a [List] request. + */ get: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/Get", requestStream: false, @@ -2439,6 +2702,7 @@ export const ContainerServiceService = { Buffer.from(Container.encode(value).finish()), responseDeserialize: (value: Buffer) => Container.decode(value), }, + /** Retrieves the list of containers in the specified folder. */ list: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/List", requestStream: false, @@ -2451,6 +2715,7 @@ export const ContainerServiceService = { responseDeserialize: (value: Buffer) => ListContainersResponse.decode(value), }, + /** Creates a container in the specified folder. */ create: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/Create", requestStream: false, @@ -2462,6 +2727,7 @@ export const ContainerServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Updates the specified container. */ update: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/Update", requestStream: false, @@ -2473,6 +2739,7 @@ export const ContainerServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Deletes the specified container. */ delete: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/Delete", requestStream: false, @@ -2484,6 +2751,7 @@ export const ContainerServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Deploys a revision for the specified container. */ deployRevision: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/DeployRevision", requestStream: false, @@ -2496,6 +2764,7 @@ export const ContainerServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Rollback the specified container to an old revision. */ rollback: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/Rollback", requestStream: false, @@ -2508,6 +2777,11 @@ export const ContainerServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** + * Returns the specified revision of a container. + * + * To get the list of available revision, make a [ListRevisions] request. + */ getRevision: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/GetRevision", requestStream: false, @@ -2520,6 +2794,10 @@ export const ContainerServiceService = { Buffer.from(Revision.encode(value).finish()), responseDeserialize: (value: Buffer) => Revision.decode(value), }, + /** + * Retrieves the list of revisions for the specified container, or of all container revisions + * in the specified folder. + */ listRevisions: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/ListRevisions", requestStream: false, @@ -2533,6 +2811,7 @@ export const ContainerServiceService = { responseDeserialize: (value: Buffer) => ListContainersRevisionsResponse.decode(value), }, + /** Lists operations for the specified container. */ listOperations: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/ListOperations", requestStream: false, @@ -2546,6 +2825,7 @@ export const ContainerServiceService = { responseDeserialize: (value: Buffer) => ListContainerOperationsResponse.decode(value), }, + /** Lists existing access bindings for the specified container. */ listAccessBindings: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/ListAccessBindings", requestStream: false, @@ -2559,6 +2839,7 @@ export const ContainerServiceService = { responseDeserialize: (value: Buffer) => ListAccessBindingsResponse.decode(value), }, + /** Sets access bindings for the container. */ setAccessBindings: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/SetAccessBindings", requestStream: false, @@ -2571,6 +2852,7 @@ export const ContainerServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Updates access bindings for the specified container. */ updateAccessBindings: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/UpdateAccessBindings", requestStream: false, @@ -2586,31 +2868,60 @@ export const ContainerServiceService = { } as const; export interface ContainerServiceServer extends UntypedServiceImplementation { + /** + * Returns the specified container. + * + * To get the list of all available containers, make a [List] request. + */ get: handleUnaryCall; + /** Retrieves the list of containers in the specified folder. */ list: handleUnaryCall; + /** Creates a container in the specified folder. */ create: handleUnaryCall; + /** Updates the specified container. */ update: handleUnaryCall; + /** Deletes the specified container. */ delete: handleUnaryCall; + /** Deploys a revision for the specified container. */ deployRevision: handleUnaryCall; + /** Rollback the specified container to an old revision. */ rollback: handleUnaryCall; + /** + * Returns the specified revision of a container. + * + * To get the list of available revision, make a [ListRevisions] request. + */ getRevision: handleUnaryCall; + /** + * Retrieves the list of revisions for the specified container, or of all container revisions + * in the specified folder. + */ listRevisions: handleUnaryCall< ListContainersRevisionsRequest, ListContainersRevisionsResponse >; + /** Lists operations for the specified container. */ listOperations: handleUnaryCall< ListContainerOperationsRequest, ListContainerOperationsResponse >; + /** Lists existing access bindings for the specified container. */ listAccessBindings: handleUnaryCall< ListAccessBindingsRequest, ListAccessBindingsResponse >; + /** Sets access bindings for the container. */ setAccessBindings: handleUnaryCall; + /** Updates access bindings for the specified container. */ updateAccessBindings: handleUnaryCall; } export interface ContainerServiceClient extends Client { + /** + * Returns the specified container. + * + * To get the list of all available containers, make a [List] request. + */ get( request: GetContainerRequest, callback: (error: ServiceError | null, response: Container) => void @@ -2626,6 +2937,7 @@ export interface ContainerServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Container) => void ): ClientUnaryCall; + /** Retrieves the list of containers in the specified folder. */ list( request: ListContainersRequest, callback: ( @@ -2650,6 +2962,7 @@ export interface ContainerServiceClient extends Client { response: ListContainersResponse ) => void ): ClientUnaryCall; + /** Creates a container in the specified folder. */ create( request: CreateContainerRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2665,6 +2978,7 @@ export interface ContainerServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Updates the specified container. */ update( request: UpdateContainerRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2680,6 +2994,7 @@ export interface ContainerServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Deletes the specified container. */ delete( request: DeleteContainerRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2695,6 +3010,7 @@ export interface ContainerServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Deploys a revision for the specified container. */ deployRevision( request: DeployContainerRevisionRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2710,6 +3026,7 @@ export interface ContainerServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Rollback the specified container to an old revision. */ rollback( request: RollbackContainerRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2725,6 +3042,11 @@ export interface ContainerServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** + * Returns the specified revision of a container. + * + * To get the list of available revision, make a [ListRevisions] request. + */ getRevision( request: GetContainerRevisionRequest, callback: (error: ServiceError | null, response: Revision) => void @@ -2740,6 +3062,10 @@ export interface ContainerServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Revision) => void ): ClientUnaryCall; + /** + * Retrieves the list of revisions for the specified container, or of all container revisions + * in the specified folder. + */ listRevisions( request: ListContainersRevisionsRequest, callback: ( @@ -2764,6 +3090,7 @@ export interface ContainerServiceClient extends Client { response: ListContainersRevisionsResponse ) => void ): ClientUnaryCall; + /** Lists operations for the specified container. */ listOperations( request: ListContainerOperationsRequest, callback: ( @@ -2788,6 +3115,7 @@ export interface ContainerServiceClient extends Client { response: ListContainerOperationsResponse ) => void ): ClientUnaryCall; + /** Lists existing access bindings for the specified container. */ listAccessBindings( request: ListAccessBindingsRequest, callback: ( @@ -2812,6 +3140,7 @@ export interface ContainerServiceClient extends Client { response: ListAccessBindingsResponse ) => void ): ClientUnaryCall; + /** Sets access bindings for the container. */ setAccessBindings( request: SetAccessBindingsRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2827,6 +3156,7 @@ export interface ContainerServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Updates access bindings for the specified container. */ updateAccessBindings( request: UpdateAccessBindingsRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/serverless/functions/v1/function.ts b/src/generated/yandex/cloud/serverless/functions/v1/function.ts index 3c0bf2b6..e76b4fd1 100644 --- a/src/generated/yandex/cloud/serverless/functions/v1/function.ts +++ b/src/generated/yandex/cloud/serverless/functions/v1/function.ts @@ -3,6 +3,11 @@ import { messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import _m0 from "protobufjs/minimal"; import { Duration } from "../../../../../google/protobuf/duration"; +import { + LogLevel_Level, + logLevel_LevelFromJSON, + logLevel_LevelToJSON, +} from "../../../../../yandex/cloud/logging/v1/log_entry"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.serverless.functions.v1"; @@ -137,8 +142,14 @@ export interface Version { connectivity?: Connectivity; /** Additional service accounts to be used by the version. */ namedServiceAccounts: { [key: string]: string }; - /** Lockbox secrets to be used by the version */ + /** Yandex Lockbox secrets to be used by the version. */ secrets: Secret[]; + /** Options for logging from the function */ + logOptions?: LogOptions; + /** S3 mounts to be used by the version. */ + storageMounts: StorageMount[]; + /** Config for asynchronous invocations of the version */ + asyncInvocationConfig?: AsyncInvocationConfig; } export enum Version_Status { @@ -196,7 +207,7 @@ export interface Version_NamedServiceAccountsEntry { /** Resources allocated to a version. */ export interface Resources { $type: "yandex.cloud.serverless.functions.v1.Resources"; - /** Amount of memory available to the version, specified in bytes. */ + /** Amount of memory available to the version, specified in bytes, multiple of 128MB. */ memory: number; } @@ -253,19 +264,80 @@ export interface ScalingPolicy { zoneRequestsLimit: number; } -/** Secret for serverless function */ +/** Secret for serverless function. */ export interface Secret { $type: "yandex.cloud.serverless.functions.v1.Secret"; - /** ID of lockbox secret */ + /** ID of Yandex Lockbox secret. */ id: string; - /** ID of secret version */ + /** ID of Yandex Lockbox version. */ versionId: string; - /** Key in secret's payload, which value to be delivered into function environment */ + /** Key in secret's payload, which value to be delivered into function environment. */ key: string; - /** environment variable in which secret's value to be delivered */ + /** environment variable in which secret's value to be delivered. */ environmentVariable: string | undefined; } +export interface LogOptions { + $type: "yandex.cloud.serverless.functions.v1.LogOptions"; + /** Is logging from function disabled. */ + disabled: boolean; + /** Entry should be written to log group resolved by ID. */ + logGroupId: string | undefined; + /** Entry should be written to default log group for specified folder. */ + folderId: string | undefined; + /** + * Minimum log entry level. + * + * See [LogLevel.Level] for details. + */ + minLevel: LogLevel_Level; +} + +export interface StorageMount { + $type: "yandex.cloud.serverless.functions.v1.StorageMount"; + /** S3 bucket name for mounting. */ + bucketId: string; + /** S3 bucket prefix for mounting. */ + prefix: string; + /** Mount point directory name (not path) for mounting. */ + mountPointName: string; + /** Is mount read only. */ + readOnly: boolean; +} + +export interface AsyncInvocationConfig { + $type: "yandex.cloud.serverless.functions.v1.AsyncInvocationConfig"; + /** Number of retries of version invocation */ + retriesCount: number; + /** Target for successful result of the version's invocation */ + successTarget?: AsyncInvocationConfig_ResponseTarget; + /** Target for unsuccessful result, if all retries failed */ + failureTarget?: AsyncInvocationConfig_ResponseTarget; + /** Service account which can invoke version */ + serviceAccountId: string; +} + +/** Target to which a result of an invocation will be sent */ +export interface AsyncInvocationConfig_ResponseTarget { + $type: "yandex.cloud.serverless.functions.v1.AsyncInvocationConfig.ResponseTarget"; + /** Target to ignore a result */ + emptyTarget?: EmptyTarget | undefined; + /** Target to send a result to ymq */ + ymqTarget?: YMQTarget | undefined; +} + +export interface YMQTarget { + $type: "yandex.cloud.serverless.functions.v1.YMQTarget"; + /** Queue ARN */ + queueArn: string; + /** Service account which has write permission on the queue. */ + serviceAccountId: string; +} + +export interface EmptyTarget { + $type: "yandex.cloud.serverless.functions.v1.EmptyTarget"; +} + const baseFunction: object = { $type: "yandex.cloud.serverless.functions.v1.Function", id: "", @@ -633,6 +705,18 @@ export const Version = { for (const v of message.secrets) { Secret.encode(v!, writer.uint32(154).fork()).ldelim(); } + if (message.logOptions !== undefined) { + LogOptions.encode(message.logOptions, writer.uint32(162).fork()).ldelim(); + } + for (const v of message.storageMounts) { + StorageMount.encode(v!, writer.uint32(170).fork()).ldelim(); + } + if (message.asyncInvocationConfig !== undefined) { + AsyncInvocationConfig.encode( + message.asyncInvocationConfig, + writer.uint32(178).fork() + ).ldelim(); + } return writer; }, @@ -644,6 +728,7 @@ export const Version = { message.environment = {}; message.namedServiceAccounts = {}; message.secrets = []; + message.storageMounts = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -712,6 +797,20 @@ export const Version = { case 19: message.secrets.push(Secret.decode(reader, reader.uint32())); break; + case 20: + message.logOptions = LogOptions.decode(reader, reader.uint32()); + break; + case 21: + message.storageMounts.push( + StorageMount.decode(reader, reader.uint32()) + ); + break; + case 22: + message.asyncInvocationConfig = AsyncInvocationConfig.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -788,6 +887,18 @@ export const Version = { message.secrets = (object.secrets ?? []).map((e: any) => Secret.fromJSON(e) ); + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromJSON(object.logOptions) + : undefined; + message.storageMounts = (object.storageMounts ?? []).map((e: any) => + StorageMount.fromJSON(e) + ); + message.asyncInvocationConfig = + object.asyncInvocationConfig !== undefined && + object.asyncInvocationConfig !== null + ? AsyncInvocationConfig.fromJSON(object.asyncInvocationConfig) + : undefined; return message; }, @@ -844,6 +955,21 @@ export const Version = { } else { obj.secrets = []; } + message.logOptions !== undefined && + (obj.logOptions = message.logOptions + ? LogOptions.toJSON(message.logOptions) + : undefined); + if (message.storageMounts) { + obj.storageMounts = message.storageMounts.map((e) => + e ? StorageMount.toJSON(e) : undefined + ); + } else { + obj.storageMounts = []; + } + message.asyncInvocationConfig !== undefined && + (obj.asyncInvocationConfig = message.asyncInvocationConfig + ? AsyncInvocationConfig.toJSON(message.asyncInvocationConfig) + : undefined); return obj; }, @@ -889,6 +1015,17 @@ export const Version = { return acc; }, {}); message.secrets = object.secrets?.map((e) => Secret.fromPartial(e)) || []; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromPartial(object.logOptions) + : undefined; + message.storageMounts = + object.storageMounts?.map((e) => StorageMount.fromPartial(e)) || []; + message.asyncInvocationConfig = + object.asyncInvocationConfig !== undefined && + object.asyncInvocationConfig !== null + ? AsyncInvocationConfig.fromPartial(object.asyncInvocationConfig) + : undefined; return message; }, }; @@ -1547,6 +1684,558 @@ export const Secret = { messageTypeRegistry.set(Secret.$type, Secret); +const baseLogOptions: object = { + $type: "yandex.cloud.serverless.functions.v1.LogOptions", + disabled: false, + minLevel: 0, +}; + +export const LogOptions = { + $type: "yandex.cloud.serverless.functions.v1.LogOptions" as const, + + encode( + message: LogOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.disabled === true) { + writer.uint32(8).bool(message.disabled); + } + if (message.logGroupId !== undefined) { + writer.uint32(18).string(message.logGroupId); + } + if (message.folderId !== undefined) { + writer.uint32(26).string(message.folderId); + } + if (message.minLevel !== 0) { + writer.uint32(32).int32(message.minLevel); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LogOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLogOptions } as LogOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.disabled = reader.bool(); + break; + case 2: + message.logGroupId = reader.string(); + break; + case 3: + message.folderId = reader.string(); + break; + case 4: + message.minLevel = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LogOptions { + const message = { ...baseLogOptions } as LogOptions; + message.disabled = + object.disabled !== undefined && object.disabled !== null + ? Boolean(object.disabled) + : false; + message.logGroupId = + object.logGroupId !== undefined && object.logGroupId !== null + ? String(object.logGroupId) + : undefined; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : undefined; + message.minLevel = + object.minLevel !== undefined && object.minLevel !== null + ? logLevel_LevelFromJSON(object.minLevel) + : 0; + return message; + }, + + toJSON(message: LogOptions): unknown { + const obj: any = {}; + message.disabled !== undefined && (obj.disabled = message.disabled); + message.logGroupId !== undefined && (obj.logGroupId = message.logGroupId); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.minLevel !== undefined && + (obj.minLevel = logLevel_LevelToJSON(message.minLevel)); + return obj; + }, + + fromPartial, I>>( + object: I + ): LogOptions { + const message = { ...baseLogOptions } as LogOptions; + message.disabled = object.disabled ?? false; + message.logGroupId = object.logGroupId ?? undefined; + message.folderId = object.folderId ?? undefined; + message.minLevel = object.minLevel ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(LogOptions.$type, LogOptions); + +const baseStorageMount: object = { + $type: "yandex.cloud.serverless.functions.v1.StorageMount", + bucketId: "", + prefix: "", + mountPointName: "", + readOnly: false, +}; + +export const StorageMount = { + $type: "yandex.cloud.serverless.functions.v1.StorageMount" as const, + + encode( + message: StorageMount, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.bucketId !== "") { + writer.uint32(10).string(message.bucketId); + } + if (message.prefix !== "") { + writer.uint32(18).string(message.prefix); + } + if (message.mountPointName !== "") { + writer.uint32(26).string(message.mountPointName); + } + if (message.readOnly === true) { + writer.uint32(32).bool(message.readOnly); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StorageMount { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStorageMount } as StorageMount; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.bucketId = reader.string(); + break; + case 2: + message.prefix = reader.string(); + break; + case 3: + message.mountPointName = reader.string(); + break; + case 4: + message.readOnly = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StorageMount { + const message = { ...baseStorageMount } as StorageMount; + message.bucketId = + object.bucketId !== undefined && object.bucketId !== null + ? String(object.bucketId) + : ""; + message.prefix = + object.prefix !== undefined && object.prefix !== null + ? String(object.prefix) + : ""; + message.mountPointName = + object.mountPointName !== undefined && object.mountPointName !== null + ? String(object.mountPointName) + : ""; + message.readOnly = + object.readOnly !== undefined && object.readOnly !== null + ? Boolean(object.readOnly) + : false; + return message; + }, + + toJSON(message: StorageMount): unknown { + const obj: any = {}; + message.bucketId !== undefined && (obj.bucketId = message.bucketId); + message.prefix !== undefined && (obj.prefix = message.prefix); + message.mountPointName !== undefined && + (obj.mountPointName = message.mountPointName); + message.readOnly !== undefined && (obj.readOnly = message.readOnly); + return obj; + }, + + fromPartial, I>>( + object: I + ): StorageMount { + const message = { ...baseStorageMount } as StorageMount; + message.bucketId = object.bucketId ?? ""; + message.prefix = object.prefix ?? ""; + message.mountPointName = object.mountPointName ?? ""; + message.readOnly = object.readOnly ?? false; + return message; + }, +}; + +messageTypeRegistry.set(StorageMount.$type, StorageMount); + +const baseAsyncInvocationConfig: object = { + $type: "yandex.cloud.serverless.functions.v1.AsyncInvocationConfig", + retriesCount: 0, + serviceAccountId: "", +}; + +export const AsyncInvocationConfig = { + $type: "yandex.cloud.serverless.functions.v1.AsyncInvocationConfig" as const, + + encode( + message: AsyncInvocationConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.retriesCount !== 0) { + writer.uint32(8).int64(message.retriesCount); + } + if (message.successTarget !== undefined) { + AsyncInvocationConfig_ResponseTarget.encode( + message.successTarget, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.failureTarget !== undefined) { + AsyncInvocationConfig_ResponseTarget.encode( + message.failureTarget, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.serviceAccountId !== "") { + writer.uint32(34).string(message.serviceAccountId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsyncInvocationConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAsyncInvocationConfig } as AsyncInvocationConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.retriesCount = longToNumber(reader.int64() as Long); + break; + case 2: + message.successTarget = AsyncInvocationConfig_ResponseTarget.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.failureTarget = AsyncInvocationConfig_ResponseTarget.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.serviceAccountId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsyncInvocationConfig { + const message = { ...baseAsyncInvocationConfig } as AsyncInvocationConfig; + message.retriesCount = + object.retriesCount !== undefined && object.retriesCount !== null + ? Number(object.retriesCount) + : 0; + message.successTarget = + object.successTarget !== undefined && object.successTarget !== null + ? AsyncInvocationConfig_ResponseTarget.fromJSON(object.successTarget) + : undefined; + message.failureTarget = + object.failureTarget !== undefined && object.failureTarget !== null + ? AsyncInvocationConfig_ResponseTarget.fromJSON(object.failureTarget) + : undefined; + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + return message; + }, + + toJSON(message: AsyncInvocationConfig): unknown { + const obj: any = {}; + message.retriesCount !== undefined && + (obj.retriesCount = Math.round(message.retriesCount)); + message.successTarget !== undefined && + (obj.successTarget = message.successTarget + ? AsyncInvocationConfig_ResponseTarget.toJSON(message.successTarget) + : undefined); + message.failureTarget !== undefined && + (obj.failureTarget = message.failureTarget + ? AsyncInvocationConfig_ResponseTarget.toJSON(message.failureTarget) + : undefined); + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + return obj; + }, + + fromPartial, I>>( + object: I + ): AsyncInvocationConfig { + const message = { ...baseAsyncInvocationConfig } as AsyncInvocationConfig; + message.retriesCount = object.retriesCount ?? 0; + message.successTarget = + object.successTarget !== undefined && object.successTarget !== null + ? AsyncInvocationConfig_ResponseTarget.fromPartial(object.successTarget) + : undefined; + message.failureTarget = + object.failureTarget !== undefined && object.failureTarget !== null + ? AsyncInvocationConfig_ResponseTarget.fromPartial(object.failureTarget) + : undefined; + message.serviceAccountId = object.serviceAccountId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AsyncInvocationConfig.$type, AsyncInvocationConfig); + +const baseAsyncInvocationConfig_ResponseTarget: object = { + $type: + "yandex.cloud.serverless.functions.v1.AsyncInvocationConfig.ResponseTarget", +}; + +export const AsyncInvocationConfig_ResponseTarget = { + $type: + "yandex.cloud.serverless.functions.v1.AsyncInvocationConfig.ResponseTarget" as const, + + encode( + message: AsyncInvocationConfig_ResponseTarget, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.emptyTarget !== undefined) { + EmptyTarget.encode( + message.emptyTarget, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.ymqTarget !== undefined) { + YMQTarget.encode(message.ymqTarget, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AsyncInvocationConfig_ResponseTarget { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAsyncInvocationConfig_ResponseTarget, + } as AsyncInvocationConfig_ResponseTarget; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.emptyTarget = EmptyTarget.decode(reader, reader.uint32()); + break; + case 2: + message.ymqTarget = YMQTarget.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AsyncInvocationConfig_ResponseTarget { + const message = { + ...baseAsyncInvocationConfig_ResponseTarget, + } as AsyncInvocationConfig_ResponseTarget; + message.emptyTarget = + object.emptyTarget !== undefined && object.emptyTarget !== null + ? EmptyTarget.fromJSON(object.emptyTarget) + : undefined; + message.ymqTarget = + object.ymqTarget !== undefined && object.ymqTarget !== null + ? YMQTarget.fromJSON(object.ymqTarget) + : undefined; + return message; + }, + + toJSON(message: AsyncInvocationConfig_ResponseTarget): unknown { + const obj: any = {}; + message.emptyTarget !== undefined && + (obj.emptyTarget = message.emptyTarget + ? EmptyTarget.toJSON(message.emptyTarget) + : undefined); + message.ymqTarget !== undefined && + (obj.ymqTarget = message.ymqTarget + ? YMQTarget.toJSON(message.ymqTarget) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): AsyncInvocationConfig_ResponseTarget { + const message = { + ...baseAsyncInvocationConfig_ResponseTarget, + } as AsyncInvocationConfig_ResponseTarget; + message.emptyTarget = + object.emptyTarget !== undefined && object.emptyTarget !== null + ? EmptyTarget.fromPartial(object.emptyTarget) + : undefined; + message.ymqTarget = + object.ymqTarget !== undefined && object.ymqTarget !== null + ? YMQTarget.fromPartial(object.ymqTarget) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + AsyncInvocationConfig_ResponseTarget.$type, + AsyncInvocationConfig_ResponseTarget +); + +const baseYMQTarget: object = { + $type: "yandex.cloud.serverless.functions.v1.YMQTarget", + queueArn: "", + serviceAccountId: "", +}; + +export const YMQTarget = { + $type: "yandex.cloud.serverless.functions.v1.YMQTarget" as const, + + encode( + message: YMQTarget, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.queueArn !== "") { + writer.uint32(10).string(message.queueArn); + } + if (message.serviceAccountId !== "") { + writer.uint32(18).string(message.serviceAccountId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): YMQTarget { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseYMQTarget } as YMQTarget; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.queueArn = reader.string(); + break; + case 2: + message.serviceAccountId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): YMQTarget { + const message = { ...baseYMQTarget } as YMQTarget; + message.queueArn = + object.queueArn !== undefined && object.queueArn !== null + ? String(object.queueArn) + : ""; + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + return message; + }, + + toJSON(message: YMQTarget): unknown { + const obj: any = {}; + message.queueArn !== undefined && (obj.queueArn = message.queueArn); + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + return obj; + }, + + fromPartial, I>>( + object: I + ): YMQTarget { + const message = { ...baseYMQTarget } as YMQTarget; + message.queueArn = object.queueArn ?? ""; + message.serviceAccountId = object.serviceAccountId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(YMQTarget.$type, YMQTarget); + +const baseEmptyTarget: object = { + $type: "yandex.cloud.serverless.functions.v1.EmptyTarget", +}; + +export const EmptyTarget = { + $type: "yandex.cloud.serverless.functions.v1.EmptyTarget" as const, + + encode(_: EmptyTarget, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): EmptyTarget { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEmptyTarget } as EmptyTarget; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): EmptyTarget { + const message = { ...baseEmptyTarget } as EmptyTarget; + return message; + }, + + toJSON(_: EmptyTarget): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>(_: I): EmptyTarget { + const message = { ...baseEmptyTarget } as EmptyTarget; + return message; + }, +}; + +messageTypeRegistry.set(EmptyTarget.$type, EmptyTarget); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts b/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts index 078cb865..c8b4e5cf 100644 --- a/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts +++ b/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts @@ -18,10 +18,13 @@ import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { Resources, Connectivity, + LogOptions, + AsyncInvocationConfig, Function, Version, Package, Secret, + StorageMount, ScalingPolicy, } from "../../../../../yandex/cloud/serverless/functions/v1/function"; import { Duration } from "../../../../../google/protobuf/duration"; @@ -203,6 +206,24 @@ export interface DeleteFunctionMetadata { functionId: string; } +export interface DeleteFunctionVersionRequest { + $type: "yandex.cloud.serverless.functions.v1.DeleteFunctionVersionRequest"; + /** ID of the function's version to delete. */ + functionVersionId: string; + /** + * Forces deletion of the version tags. + * + * If the value equals false and the function has tags with the selected version then request returns an error. + */ + force: boolean; +} + +export interface DeleteFunctionVersionMetadata { + $type: "yandex.cloud.serverless.functions.v1.DeleteFunctionVersionMetadata"; + /** ID of the function's version is being deleted. */ + functionVersionId: string; +} + export interface ListRuntimesRequest { $type: "yandex.cloud.serverless.functions.v1.ListRuntimesRequest"; } @@ -348,8 +369,14 @@ export interface CreateFunctionVersionRequest { connectivity?: Connectivity; /** Additional service accounts to be used by the version. */ namedServiceAccounts: { [key: string]: string }; - /** Lockbox secrets to be used by the version */ + /** Yandex Lockbox secrets to be used by the version. */ secrets: Secret[]; + /** Options for logging from the function */ + logOptions?: LogOptions; + /** S3 mounts to be used by the version. */ + storageMounts: StorageMount[]; + /** Config for asynchronous invocations of the version */ + asyncInvocationConfig?: AsyncInvocationConfig; } export interface CreateFunctionVersionRequest_EnvironmentEntry { @@ -1698,6 +1725,173 @@ export const DeleteFunctionMetadata = { messageTypeRegistry.set(DeleteFunctionMetadata.$type, DeleteFunctionMetadata); +const baseDeleteFunctionVersionRequest: object = { + $type: "yandex.cloud.serverless.functions.v1.DeleteFunctionVersionRequest", + functionVersionId: "", + force: false, +}; + +export const DeleteFunctionVersionRequest = { + $type: + "yandex.cloud.serverless.functions.v1.DeleteFunctionVersionRequest" as const, + + encode( + message: DeleteFunctionVersionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.functionVersionId !== "") { + writer.uint32(18).string(message.functionVersionId); + } + if (message.force === true) { + writer.uint32(24).bool(message.force); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteFunctionVersionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteFunctionVersionRequest, + } as DeleteFunctionVersionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.functionVersionId = reader.string(); + break; + case 3: + message.force = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteFunctionVersionRequest { + const message = { + ...baseDeleteFunctionVersionRequest, + } as DeleteFunctionVersionRequest; + message.functionVersionId = + object.functionVersionId !== undefined && + object.functionVersionId !== null + ? String(object.functionVersionId) + : ""; + message.force = + object.force !== undefined && object.force !== null + ? Boolean(object.force) + : false; + return message; + }, + + toJSON(message: DeleteFunctionVersionRequest): unknown { + const obj: any = {}; + message.functionVersionId !== undefined && + (obj.functionVersionId = message.functionVersionId); + message.force !== undefined && (obj.force = message.force); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteFunctionVersionRequest { + const message = { + ...baseDeleteFunctionVersionRequest, + } as DeleteFunctionVersionRequest; + message.functionVersionId = object.functionVersionId ?? ""; + message.force = object.force ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteFunctionVersionRequest.$type, + DeleteFunctionVersionRequest +); + +const baseDeleteFunctionVersionMetadata: object = { + $type: "yandex.cloud.serverless.functions.v1.DeleteFunctionVersionMetadata", + functionVersionId: "", +}; + +export const DeleteFunctionVersionMetadata = { + $type: + "yandex.cloud.serverless.functions.v1.DeleteFunctionVersionMetadata" as const, + + encode( + message: DeleteFunctionVersionMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.functionVersionId !== "") { + writer.uint32(18).string(message.functionVersionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteFunctionVersionMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteFunctionVersionMetadata, + } as DeleteFunctionVersionMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.functionVersionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteFunctionVersionMetadata { + const message = { + ...baseDeleteFunctionVersionMetadata, + } as DeleteFunctionVersionMetadata; + message.functionVersionId = + object.functionVersionId !== undefined && + object.functionVersionId !== null + ? String(object.functionVersionId) + : ""; + return message; + }, + + toJSON(message: DeleteFunctionVersionMetadata): unknown { + const obj: any = {}; + message.functionVersionId !== undefined && + (obj.functionVersionId = message.functionVersionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteFunctionVersionMetadata { + const message = { + ...baseDeleteFunctionVersionMetadata, + } as DeleteFunctionVersionMetadata; + message.functionVersionId = object.functionVersionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteFunctionVersionMetadata.$type, + DeleteFunctionVersionMetadata +); + const baseListRuntimesRequest: object = { $type: "yandex.cloud.serverless.functions.v1.ListRuntimesRequest", }; @@ -2330,6 +2524,18 @@ export const CreateFunctionVersionRequest = { for (const v of message.secrets) { Secret.encode(v!, writer.uint32(146).fork()).ldelim(); } + if (message.logOptions !== undefined) { + LogOptions.encode(message.logOptions, writer.uint32(154).fork()).ldelim(); + } + for (const v of message.storageMounts) { + StorageMount.encode(v!, writer.uint32(162).fork()).ldelim(); + } + if (message.asyncInvocationConfig !== undefined) { + AsyncInvocationConfig.encode( + message.asyncInvocationConfig, + writer.uint32(178).fork() + ).ldelim(); + } return writer; }, @@ -2346,6 +2552,7 @@ export const CreateFunctionVersionRequest = { message.tag = []; message.namedServiceAccounts = {}; message.secrets = []; + message.storageMounts = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2407,6 +2614,20 @@ export const CreateFunctionVersionRequest = { case 18: message.secrets.push(Secret.decode(reader, reader.uint32())); break; + case 19: + message.logOptions = LogOptions.decode(reader, reader.uint32()); + break; + case 20: + message.storageMounts.push( + StorageMount.decode(reader, reader.uint32()) + ); + break; + case 22: + message.asyncInvocationConfig = AsyncInvocationConfig.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -2479,6 +2700,18 @@ export const CreateFunctionVersionRequest = { message.secrets = (object.secrets ?? []).map((e: any) => Secret.fromJSON(e) ); + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromJSON(object.logOptions) + : undefined; + message.storageMounts = (object.storageMounts ?? []).map((e: any) => + StorageMount.fromJSON(e) + ); + message.asyncInvocationConfig = + object.asyncInvocationConfig !== undefined && + object.asyncInvocationConfig !== null + ? AsyncInvocationConfig.fromJSON(object.asyncInvocationConfig) + : undefined; return message; }, @@ -2537,6 +2770,21 @@ export const CreateFunctionVersionRequest = { } else { obj.secrets = []; } + message.logOptions !== undefined && + (obj.logOptions = message.logOptions + ? LogOptions.toJSON(message.logOptions) + : undefined); + if (message.storageMounts) { + obj.storageMounts = message.storageMounts.map((e) => + e ? StorageMount.toJSON(e) : undefined + ); + } else { + obj.storageMounts = []; + } + message.asyncInvocationConfig !== undefined && + (obj.asyncInvocationConfig = message.asyncInvocationConfig + ? AsyncInvocationConfig.toJSON(message.asyncInvocationConfig) + : undefined); return obj; }, @@ -2587,6 +2835,17 @@ export const CreateFunctionVersionRequest = { return acc; }, {}); message.secrets = object.secrets?.map((e) => Secret.fromPartial(e)) || []; + message.logOptions = + object.logOptions !== undefined && object.logOptions !== null + ? LogOptions.fromPartial(object.logOptions) + : undefined; + message.storageMounts = + object.storageMounts?.map((e) => StorageMount.fromPartial(e)) || []; + message.asyncInvocationConfig = + object.asyncInvocationConfig !== undefined && + object.asyncInvocationConfig !== null + ? AsyncInvocationConfig.fromPartial(object.asyncInvocationConfig) + : undefined; return message; }, }; @@ -4231,6 +4490,23 @@ export const FunctionServiceService = { responseDeserialize: (value: Buffer) => ListFunctionsVersionsResponse.decode(value), }, + /** + * Deletes the specified version of a function. + * + * NOTE: old untagged function versions are deleted automatically. + */ + deleteVersion: { + path: "/yandex.cloud.serverless.functions.v1.FunctionService/DeleteVersion", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteFunctionVersionRequest) => + Buffer.from(DeleteFunctionVersionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteFunctionVersionRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Set a tag for the specified version of a function. */ setTag: { path: "/yandex.cloud.serverless.functions.v1.FunctionService/SetTag", @@ -4426,6 +4702,12 @@ export interface FunctionServiceServer extends UntypedServiceImplementation { ListFunctionsVersionsRequest, ListFunctionsVersionsResponse >; + /** + * Deletes the specified version of a function. + * + * NOTE: old untagged function versions are deleted automatically. + */ + deleteVersion: handleUnaryCall; /** Set a tag for the specified version of a function. */ setTag: handleUnaryCall; /** Remove a tag from the specified version of a function. */ @@ -4626,6 +4908,26 @@ export interface FunctionServiceClient extends Client { response: ListFunctionsVersionsResponse ) => void ): ClientUnaryCall; + /** + * Deletes the specified version of a function. + * + * NOTE: old untagged function versions are deleted automatically. + */ + deleteVersion( + request: DeleteFunctionVersionRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteVersion( + request: DeleteFunctionVersionRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteVersion( + request: DeleteFunctionVersionRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Set a tag for the specified version of a function. */ setTag( request: SetFunctionTagRequest, diff --git a/src/generated/yandex/cloud/serverless/triggers/v1/trigger.ts b/src/generated/yandex/cloud/serverless/triggers/v1/trigger.ts index c6ae3981..f198c333 100644 --- a/src/generated/yandex/cloud/serverless/triggers/v1/trigger.ts +++ b/src/generated/yandex/cloud/serverless/triggers/v1/trigger.ts @@ -307,6 +307,8 @@ export interface Trigger_Timer { $type: "yandex.cloud.serverless.triggers.v1.Trigger.Timer"; /** Description of a schedule as a [cron expression](/docs/functions/concepts/trigger/timer). */ cronExpression: string; + /** Payload to be passed to function. */ + payload: string; /** Instructions for invoking a function once. */ invokeFunction?: InvokeFunctionOnce | undefined; /** Instructions for invoking a function with retry. */ @@ -341,6 +343,8 @@ export interface Trigger_IoTMessage { deviceId: string; /** MQTT topic whose messages activate the trigger. */ mqttTopic: string; + /** Batch settings for processing events. */ + batchSettings?: BatchSettings; /** Instructions for invoking a function with retries as needed. */ invokeFunction?: InvokeFunctionWithRetry | undefined; /** Instructions for invoking a container with retries as needed. */ @@ -354,6 +358,8 @@ export interface Trigger_IoTBrokerMessage { brokerId: string; /** MQTT topic whose messages activate the trigger. */ mqttTopic: string; + /** Batch settings for processing events. */ + batchSettings?: BatchSettings; /** Instructions for invoking a function with retries as needed. */ invokeFunction?: InvokeFunctionWithRetry | undefined; /** Instructions for invoking a container with retries as needed. */ @@ -370,6 +376,8 @@ export interface Trigger_ObjectStorage { prefix: string; /** Suffix of the object key. Filter, optional. */ suffix: string; + /** Batch settings for processing events. */ + batchSettings?: BatchSettings; /** Instructions for invoking a function with retries as needed. */ invokeFunction?: InvokeFunctionWithRetry | undefined; /** Instructions for invoking a container with retries as needed. */ @@ -386,6 +394,8 @@ export interface Trigger_ContainerRegistry { imageName: string; /** Docker-image tag. Filter, optional. */ tag: string; + /** Batch settings for processing events. */ + batchSettings?: BatchSettings; /** Instructions for invoking a function with retries as needed. */ invokeFunction?: InvokeFunctionWithRetry | undefined; /** Instructions for invoking a container with retries as needed. */ @@ -410,6 +420,7 @@ export interface Trigger_Logging { logGroupId: string; resourceType: string[]; resourceId: string[]; + streamName: string[]; levels: LogLevel_Level[]; /** Batch settings for processing log events. */ batchSettings?: LoggingBatchSettings; @@ -569,6 +580,14 @@ export interface DataStream { invokeContainer?: InvokeContainerWithRetry | undefined; } +export interface ObjectStorageBucketSettings { + $type: "yandex.cloud.serverless.triggers.v1.ObjectStorageBucketSettings"; + /** Bucket for saving. */ + bucketId: string; + /** SA which has write permission on storage. */ + serviceAccountId: string; +} + export interface Mail { $type: "yandex.cloud.serverless.triggers.v1.Mail"; /** @@ -576,6 +595,10 @@ export interface Mail { * Field is ignored for write requests and populated on trigger creation. */ email: string; + /** Batch settings for processing events. */ + batchSettings?: BatchSettings; + /** Bucket settings for saving attachments. */ + attachmentsBucket?: ObjectStorageBucketSettings; invokeFunction?: InvokeFunctionWithRetry | undefined; invokeContainer?: InvokeContainerWithRetry | undefined; } @@ -1125,6 +1148,7 @@ messageTypeRegistry.set(Trigger_Rule.$type, Trigger_Rule); const baseTrigger_Timer: object = { $type: "yandex.cloud.serverless.triggers.v1.Trigger.Timer", cronExpression: "", + payload: "", }; export const Trigger_Timer = { @@ -1137,6 +1161,9 @@ export const Trigger_Timer = { if (message.cronExpression !== "") { writer.uint32(10).string(message.cronExpression); } + if (message.payload !== "") { + writer.uint32(18).string(message.payload); + } if (message.invokeFunction !== undefined) { InvokeFunctionOnce.encode( message.invokeFunction, @@ -1168,6 +1195,9 @@ export const Trigger_Timer = { case 1: message.cronExpression = reader.string(); break; + case 2: + message.payload = reader.string(); + break; case 101: message.invokeFunction = InvokeFunctionOnce.decode( reader, @@ -1200,6 +1230,10 @@ export const Trigger_Timer = { object.cronExpression !== undefined && object.cronExpression !== null ? String(object.cronExpression) : ""; + message.payload = + object.payload !== undefined && object.payload !== null + ? String(object.payload) + : ""; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionOnce.fromJSON(object.invokeFunction) @@ -1221,6 +1255,7 @@ export const Trigger_Timer = { const obj: any = {}; message.cronExpression !== undefined && (obj.cronExpression = message.cronExpression); + message.payload !== undefined && (obj.payload = message.payload); message.invokeFunction !== undefined && (obj.invokeFunction = message.invokeFunction ? InvokeFunctionOnce.toJSON(message.invokeFunction) @@ -1241,6 +1276,7 @@ export const Trigger_Timer = { ): Trigger_Timer { const message = { ...baseTrigger_Timer } as Trigger_Timer; message.cronExpression = object.cronExpression ?? ""; + message.payload = object.payload ?? ""; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionOnce.fromPartial(object.invokeFunction) @@ -1455,6 +1491,12 @@ export const Trigger_IoTMessage = { if (message.mqttTopic !== "") { writer.uint32(26).string(message.mqttTopic); } + if (message.batchSettings !== undefined) { + BatchSettings.encode( + message.batchSettings, + writer.uint32(34).fork() + ).ldelim(); + } if (message.invokeFunction !== undefined) { InvokeFunctionWithRetry.encode( message.invokeFunction, @@ -1486,6 +1528,9 @@ export const Trigger_IoTMessage = { case 3: message.mqttTopic = reader.string(); break; + case 4: + message.batchSettings = BatchSettings.decode(reader, reader.uint32()); + break; case 101: message.invokeFunction = InvokeFunctionWithRetry.decode( reader, @@ -1520,6 +1565,10 @@ export const Trigger_IoTMessage = { object.mqttTopic !== undefined && object.mqttTopic !== null ? String(object.mqttTopic) : ""; + message.batchSettings = + object.batchSettings !== undefined && object.batchSettings !== null + ? BatchSettings.fromJSON(object.batchSettings) + : undefined; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionWithRetry.fromJSON(object.invokeFunction) @@ -1536,6 +1585,10 @@ export const Trigger_IoTMessage = { message.registryId !== undefined && (obj.registryId = message.registryId); message.deviceId !== undefined && (obj.deviceId = message.deviceId); message.mqttTopic !== undefined && (obj.mqttTopic = message.mqttTopic); + message.batchSettings !== undefined && + (obj.batchSettings = message.batchSettings + ? BatchSettings.toJSON(message.batchSettings) + : undefined); message.invokeFunction !== undefined && (obj.invokeFunction = message.invokeFunction ? InvokeFunctionWithRetry.toJSON(message.invokeFunction) @@ -1554,6 +1607,10 @@ export const Trigger_IoTMessage = { message.registryId = object.registryId ?? ""; message.deviceId = object.deviceId ?? ""; message.mqttTopic = object.mqttTopic ?? ""; + message.batchSettings = + object.batchSettings !== undefined && object.batchSettings !== null + ? BatchSettings.fromPartial(object.batchSettings) + : undefined; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionWithRetry.fromPartial(object.invokeFunction) @@ -1588,6 +1645,12 @@ export const Trigger_IoTBrokerMessage = { if (message.mqttTopic !== "") { writer.uint32(18).string(message.mqttTopic); } + if (message.batchSettings !== undefined) { + BatchSettings.encode( + message.batchSettings, + writer.uint32(26).fork() + ).ldelim(); + } if (message.invokeFunction !== undefined) { InvokeFunctionWithRetry.encode( message.invokeFunction, @@ -1621,6 +1684,9 @@ export const Trigger_IoTBrokerMessage = { case 2: message.mqttTopic = reader.string(); break; + case 3: + message.batchSettings = BatchSettings.decode(reader, reader.uint32()); + break; case 101: message.invokeFunction = InvokeFunctionWithRetry.decode( reader, @@ -1653,6 +1719,10 @@ export const Trigger_IoTBrokerMessage = { object.mqttTopic !== undefined && object.mqttTopic !== null ? String(object.mqttTopic) : ""; + message.batchSettings = + object.batchSettings !== undefined && object.batchSettings !== null + ? BatchSettings.fromJSON(object.batchSettings) + : undefined; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionWithRetry.fromJSON(object.invokeFunction) @@ -1668,6 +1738,10 @@ export const Trigger_IoTBrokerMessage = { const obj: any = {}; message.brokerId !== undefined && (obj.brokerId = message.brokerId); message.mqttTopic !== undefined && (obj.mqttTopic = message.mqttTopic); + message.batchSettings !== undefined && + (obj.batchSettings = message.batchSettings + ? BatchSettings.toJSON(message.batchSettings) + : undefined); message.invokeFunction !== undefined && (obj.invokeFunction = message.invokeFunction ? InvokeFunctionWithRetry.toJSON(message.invokeFunction) @@ -1687,6 +1761,10 @@ export const Trigger_IoTBrokerMessage = { } as Trigger_IoTBrokerMessage; message.brokerId = object.brokerId ?? ""; message.mqttTopic = object.mqttTopic ?? ""; + message.batchSettings = + object.batchSettings !== undefined && object.batchSettings !== null + ? BatchSettings.fromPartial(object.batchSettings) + : undefined; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionWithRetry.fromPartial(object.invokeFunction) @@ -1733,6 +1811,12 @@ export const Trigger_ObjectStorage = { if (message.suffix !== "") { writer.uint32(58).string(message.suffix); } + if (message.batchSettings !== undefined) { + BatchSettings.encode( + message.batchSettings, + writer.uint32(66).fork() + ).ldelim(); + } if (message.invokeFunction !== undefined) { InvokeFunctionWithRetry.encode( message.invokeFunction, @@ -1778,6 +1862,9 @@ export const Trigger_ObjectStorage = { case 7: message.suffix = reader.string(); break; + case 8: + message.batchSettings = BatchSettings.decode(reader, reader.uint32()); + break; case 101: message.invokeFunction = InvokeFunctionWithRetry.decode( reader, @@ -1815,6 +1902,10 @@ export const Trigger_ObjectStorage = { object.suffix !== undefined && object.suffix !== null ? String(object.suffix) : ""; + message.batchSettings = + object.batchSettings !== undefined && object.batchSettings !== null + ? BatchSettings.fromJSON(object.batchSettings) + : undefined; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionWithRetry.fromJSON(object.invokeFunction) @@ -1838,6 +1929,10 @@ export const Trigger_ObjectStorage = { message.bucketId !== undefined && (obj.bucketId = message.bucketId); message.prefix !== undefined && (obj.prefix = message.prefix); message.suffix !== undefined && (obj.suffix = message.suffix); + message.batchSettings !== undefined && + (obj.batchSettings = message.batchSettings + ? BatchSettings.toJSON(message.batchSettings) + : undefined); message.invokeFunction !== undefined && (obj.invokeFunction = message.invokeFunction ? InvokeFunctionWithRetry.toJSON(message.invokeFunction) @@ -1857,6 +1952,10 @@ export const Trigger_ObjectStorage = { message.bucketId = object.bucketId ?? ""; message.prefix = object.prefix ?? ""; message.suffix = object.suffix ?? ""; + message.batchSettings = + object.batchSettings !== undefined && object.batchSettings !== null + ? BatchSettings.fromPartial(object.batchSettings) + : undefined; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionWithRetry.fromPartial(object.invokeFunction) @@ -1901,6 +2000,12 @@ export const Trigger_ContainerRegistry = { if (message.tag !== "") { writer.uint32(50).string(message.tag); } + if (message.batchSettings !== undefined) { + BatchSettings.encode( + message.batchSettings, + writer.uint32(58).fork() + ).ldelim(); + } if (message.invokeFunction !== undefined) { InvokeFunctionWithRetry.encode( message.invokeFunction, @@ -1948,6 +2053,9 @@ export const Trigger_ContainerRegistry = { case 6: message.tag = reader.string(); break; + case 7: + message.batchSettings = BatchSettings.decode(reader, reader.uint32()); + break; case 101: message.invokeFunction = InvokeFunctionWithRetry.decode( reader, @@ -1985,6 +2093,10 @@ export const Trigger_ContainerRegistry = { : ""; message.tag = object.tag !== undefined && object.tag !== null ? String(object.tag) : ""; + message.batchSettings = + object.batchSettings !== undefined && object.batchSettings !== null + ? BatchSettings.fromJSON(object.batchSettings) + : undefined; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionWithRetry.fromJSON(object.invokeFunction) @@ -2008,6 +2120,10 @@ export const Trigger_ContainerRegistry = { message.registryId !== undefined && (obj.registryId = message.registryId); message.imageName !== undefined && (obj.imageName = message.imageName); message.tag !== undefined && (obj.tag = message.tag); + message.batchSettings !== undefined && + (obj.batchSettings = message.batchSettings + ? BatchSettings.toJSON(message.batchSettings) + : undefined); message.invokeFunction !== undefined && (obj.invokeFunction = message.invokeFunction ? InvokeFunctionWithRetry.toJSON(message.invokeFunction) @@ -2029,6 +2145,10 @@ export const Trigger_ContainerRegistry = { message.registryId = object.registryId ?? ""; message.imageName = object.imageName ?? ""; message.tag = object.tag ?? ""; + message.batchSettings = + object.batchSettings !== undefined && object.batchSettings !== null + ? BatchSettings.fromPartial(object.batchSettings) + : undefined; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionWithRetry.fromPartial(object.invokeFunction) @@ -2187,6 +2307,7 @@ const baseTrigger_Logging: object = { logGroupId: "", resourceType: "", resourceId: "", + streamName: "", levels: 0, }; @@ -2206,6 +2327,9 @@ export const Trigger_Logging = { for (const v of message.resourceId) { writer.uint32(34).string(v!); } + for (const v of message.streamName) { + writer.uint32(58).string(v!); + } writer.uint32(42).fork(); for (const v of message.levels) { writer.int32(v); @@ -2238,6 +2362,7 @@ export const Trigger_Logging = { const message = { ...baseTrigger_Logging } as Trigger_Logging; message.resourceType = []; message.resourceId = []; + message.streamName = []; message.levels = []; while (reader.pos < end) { const tag = reader.uint32(); @@ -2251,6 +2376,9 @@ export const Trigger_Logging = { case 4: message.resourceId.push(reader.string()); break; + case 7: + message.streamName.push(reader.string()); + break; case 5: if ((tag & 7) === 2) { const end2 = reader.uint32() + reader.pos; @@ -2297,6 +2425,7 @@ export const Trigger_Logging = { String(e) ); message.resourceId = (object.resourceId ?? []).map((e: any) => String(e)); + message.streamName = (object.streamName ?? []).map((e: any) => String(e)); message.levels = (object.levels ?? []).map((e: any) => logLevel_LevelFromJSON(e) ); @@ -2328,6 +2457,11 @@ export const Trigger_Logging = { } else { obj.resourceId = []; } + if (message.streamName) { + obj.streamName = message.streamName.map((e) => e); + } else { + obj.streamName = []; + } if (message.levels) { obj.levels = message.levels.map((e) => logLevel_LevelToJSON(e)); } else { @@ -2355,6 +2489,7 @@ export const Trigger_Logging = { message.logGroupId = object.logGroupId ?? ""; message.resourceType = object.resourceType?.map((e) => e) || []; message.resourceId = object.resourceId?.map((e) => e) || []; + message.streamName = object.streamName?.map((e) => e) || []; message.levels = object.levels?.map((e) => e) || []; message.batchSettings = object.batchSettings !== undefined && object.batchSettings !== null @@ -3636,6 +3771,95 @@ export const DataStream = { messageTypeRegistry.set(DataStream.$type, DataStream); +const baseObjectStorageBucketSettings: object = { + $type: "yandex.cloud.serverless.triggers.v1.ObjectStorageBucketSettings", + bucketId: "", + serviceAccountId: "", +}; + +export const ObjectStorageBucketSettings = { + $type: + "yandex.cloud.serverless.triggers.v1.ObjectStorageBucketSettings" as const, + + encode( + message: ObjectStorageBucketSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.bucketId !== "") { + writer.uint32(10).string(message.bucketId); + } + if (message.serviceAccountId !== "") { + writer.uint32(18).string(message.serviceAccountId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ObjectStorageBucketSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseObjectStorageBucketSettings, + } as ObjectStorageBucketSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.bucketId = reader.string(); + break; + case 2: + message.serviceAccountId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ObjectStorageBucketSettings { + const message = { + ...baseObjectStorageBucketSettings, + } as ObjectStorageBucketSettings; + message.bucketId = + object.bucketId !== undefined && object.bucketId !== null + ? String(object.bucketId) + : ""; + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + return message; + }, + + toJSON(message: ObjectStorageBucketSettings): unknown { + const obj: any = {}; + message.bucketId !== undefined && (obj.bucketId = message.bucketId); + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ObjectStorageBucketSettings { + const message = { + ...baseObjectStorageBucketSettings, + } as ObjectStorageBucketSettings; + message.bucketId = object.bucketId ?? ""; + message.serviceAccountId = object.serviceAccountId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ObjectStorageBucketSettings.$type, + ObjectStorageBucketSettings +); + const baseMail: object = { $type: "yandex.cloud.serverless.triggers.v1.Mail", email: "", @@ -3648,6 +3872,18 @@ export const Mail = { if (message.email !== "") { writer.uint32(18).string(message.email); } + if (message.batchSettings !== undefined) { + BatchSettings.encode( + message.batchSettings, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.attachmentsBucket !== undefined) { + ObjectStorageBucketSettings.encode( + message.attachmentsBucket, + writer.uint32(34).fork() + ).ldelim(); + } if (message.invokeFunction !== undefined) { InvokeFunctionWithRetry.encode( message.invokeFunction, @@ -3673,6 +3909,15 @@ export const Mail = { case 2: message.email = reader.string(); break; + case 3: + message.batchSettings = BatchSettings.decode(reader, reader.uint32()); + break; + case 4: + message.attachmentsBucket = ObjectStorageBucketSettings.decode( + reader, + reader.uint32() + ); + break; case 101: message.invokeFunction = InvokeFunctionWithRetry.decode( reader, @@ -3699,6 +3944,15 @@ export const Mail = { object.email !== undefined && object.email !== null ? String(object.email) : ""; + message.batchSettings = + object.batchSettings !== undefined && object.batchSettings !== null + ? BatchSettings.fromJSON(object.batchSettings) + : undefined; + message.attachmentsBucket = + object.attachmentsBucket !== undefined && + object.attachmentsBucket !== null + ? ObjectStorageBucketSettings.fromJSON(object.attachmentsBucket) + : undefined; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionWithRetry.fromJSON(object.invokeFunction) @@ -3713,6 +3967,14 @@ export const Mail = { toJSON(message: Mail): unknown { const obj: any = {}; message.email !== undefined && (obj.email = message.email); + message.batchSettings !== undefined && + (obj.batchSettings = message.batchSettings + ? BatchSettings.toJSON(message.batchSettings) + : undefined); + message.attachmentsBucket !== undefined && + (obj.attachmentsBucket = message.attachmentsBucket + ? ObjectStorageBucketSettings.toJSON(message.attachmentsBucket) + : undefined); message.invokeFunction !== undefined && (obj.invokeFunction = message.invokeFunction ? InvokeFunctionWithRetry.toJSON(message.invokeFunction) @@ -3727,6 +3989,15 @@ export const Mail = { fromPartial, I>>(object: I): Mail { const message = { ...baseMail } as Mail; message.email = object.email ?? ""; + message.batchSettings = + object.batchSettings !== undefined && object.batchSettings !== null + ? BatchSettings.fromPartial(object.batchSettings) + : undefined; + message.attachmentsBucket = + object.attachmentsBucket !== undefined && + object.attachmentsBucket !== null + ? ObjectStorageBucketSettings.fromPartial(object.attachmentsBucket) + : undefined; message.invokeFunction = object.invokeFunction !== undefined && object.invokeFunction !== null ? InvokeFunctionWithRetry.fromPartial(object.invokeFunction) diff --git a/src/generated/yandex/cloud/serverless/triggers/v1/trigger_service.ts b/src/generated/yandex/cloud/serverless/triggers/v1/trigger_service.ts index dcd4cedd..810d80c6 100644 --- a/src/generated/yandex/cloud/serverless/triggers/v1/trigger_service.ts +++ b/src/generated/yandex/cloud/serverless/triggers/v1/trigger_service.ts @@ -137,6 +137,8 @@ export interface UpdateTriggerRequest { * to add or remove a label, request the current set of labels with a [TriggerService.Get] request. */ labels: { [key: string]: string }; + /** New parameters for trigger. */ + rule?: Trigger_Rule; } export interface UpdateTriggerRequest_LabelsEntry { @@ -828,6 +830,9 @@ export const UpdateTriggerRequest = { writer.uint32(42).fork() ).ldelim(); }); + if (message.rule !== undefined) { + Trigger_Rule.encode(message.rule, writer.uint32(50).fork()).ldelim(); + } return writer; }, @@ -863,6 +868,9 @@ export const UpdateTriggerRequest = { message.labels[entry5.key] = entry5.value; } break; + case 6: + message.rule = Trigger_Rule.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -895,6 +903,10 @@ export const UpdateTriggerRequest = { acc[key] = String(value); return acc; }, {}); + message.rule = + object.rule !== undefined && object.rule !== null + ? Trigger_Rule.fromJSON(object.rule) + : undefined; return message; }, @@ -914,6 +926,8 @@ export const UpdateTriggerRequest = { obj.labels[k] = v; }); } + message.rule !== undefined && + (obj.rule = message.rule ? Trigger_Rule.toJSON(message.rule) : undefined); return obj; }, @@ -936,6 +950,10 @@ export const UpdateTriggerRequest = { } return acc; }, {}); + message.rule = + object.rule !== undefined && object.rule !== null + ? Trigger_Rule.fromPartial(object.rule) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/service_clients.ts b/src/generated/yandex/cloud/service_clients.ts index c007e866..adb79e1e 100644 --- a/src/generated/yandex/cloud/service_clients.ts +++ b/src/generated/yandex/cloud/service_clients.ts @@ -1,4 +1,6 @@ import * as cloudApi from '.' +export const TextRecognitionServiceClient = cloudApi.ai.ocr_service.TextRecognitionServiceClient; +export const SttServiceClient = cloudApi.ai.stt_service.SttServiceClient; export const SttServiceClient = cloudApi.ai.stt_service.SttServiceClient; export const TranslationServiceClient = cloudApi.ai.translate_translation_service.TranslationServiceClient; export const SynthesizerClient = cloudApi.ai.tts_service.SynthesizerClient; @@ -9,6 +11,10 @@ export const HttpRouterServiceClient = cloudApi.apploadbalancer.http_router_serv export const LoadBalancerServiceClient = cloudApi.apploadbalancer.load_balancer_service.LoadBalancerServiceClient; export const AlbTargetGroupServiceClient = cloudApi.apploadbalancer.target_group_service.TargetGroupServiceClient; export const VirtualHostServiceClient = cloudApi.apploadbalancer.virtual_host_service.VirtualHostServiceClient; +export const BackupServiceClient = cloudApi.backup.backup_service.BackupServiceClient; +export const PolicyServiceClient = cloudApi.backup.policy_service.PolicyServiceClient; +export const ProviderServiceClient = cloudApi.backup.provider_service.ProviderServiceClient; +export const ResourceServiceClient = cloudApi.backup.resource_service.ResourceServiceClient; export const BillingAccountServiceClient = cloudApi.billing.billing_account_service.BillingAccountServiceClient; export const BudgetServiceClient = cloudApi.billing.budget_service.BudgetServiceClient; export const CustomerServiceClient = cloudApi.billing.customer_service.CustomerServiceClient; @@ -26,6 +32,7 @@ export const DiskPlacementGroupServiceClient = cloudApi.compute.disk_placement_g export const DiskServiceClient = cloudApi.compute.disk_service.DiskServiceClient; export const DiskTypeServiceClient = cloudApi.compute.disk_type_service.DiskTypeServiceClient; export const FilesystemServiceClient = cloudApi.compute.filesystem_service.FilesystemServiceClient; +export const GpuClusterServiceClient = cloudApi.compute.gpu_cluster_service.GpuClusterServiceClient; export const HostGroupServiceClient = cloudApi.compute.host_group_service.HostGroupServiceClient; export const HostTypeServiceClient = cloudApi.compute.host_type_service.HostTypeServiceClient; export const ComputeImageServiceClient = cloudApi.compute.image_service.ImageServiceClient; @@ -39,6 +46,7 @@ export const CrImageServiceClient = cloudApi.containerregistry.image_service.Ima export const LifecyclePolicyServiceClient = cloudApi.containerregistry.lifecycle_policy_service.LifecyclePolicyServiceClient; export const RegistryServiceClient = cloudApi.containerregistry.registry_service.RegistryServiceClient; export const RepositoryServiceClient = cloudApi.containerregistry.repository_service.RepositoryServiceClient; +export const ScanPolicyServiceClient = cloudApi.containerregistry.scan_policy_service.ScanPolicyServiceClient; export const ScannerServiceClient = cloudApi.containerregistry.scanner_service.ScannerServiceClient; export const DataProcClusterServiceClient = cloudApi.dataproc.cluster_service.ClusterServiceClient; export const JobServiceClient = cloudApi.dataproc.job_service.JobServiceClient; @@ -51,6 +59,8 @@ export const FolderBudgetServiceClient = cloudApi.datasphere.folder_budget_servi export const NodeServiceClient = cloudApi.datasphere.node_service.NodeServiceClient; export const ProjectDataServiceClient = cloudApi.datasphere.project_data_service.ProjectDataServiceClient; export const ProjectServiceClient = cloudApi.datasphere.project_service.ProjectServiceClient; +export const CommunityServiceClient = cloudApi.datasphere.community_service.CommunityServiceClient; +export const ProjectServiceClient = cloudApi.datasphere.project_service.ProjectServiceClient; export const EndpointServiceClient = cloudApi.datatransfer.endpoint_service.EndpointServiceClient; export const TransferServiceClient = cloudApi.datatransfer.transfer_service.TransferServiceClient; export const DnsZoneServiceClient = cloudApi.dns.dns_zone_service.DnsZoneServiceClient; @@ -74,13 +84,29 @@ export const NodeGroupServiceClient = cloudApi.k8s.node_group_service.NodeGroupS export const VersionServiceClient = cloudApi.k8s.version_service.VersionServiceClient; export const SymmetricCryptoServiceClient = cloudApi.kms.symmetric_crypto_service.SymmetricCryptoServiceClient; export const SymmetricKeyServiceClient = cloudApi.kms.symmetric_key_service.SymmetricKeyServiceClient; +export const AsymmetricEncryptionCryptoServiceClient = cloudApi.kms.asymmetric_encryption_crypto_service.AsymmetricEncryptionCryptoServiceClient; +export const AsymmetricEncryptionKeyServiceClient = cloudApi.kms.asymmetric_encryption_key_service.AsymmetricEncryptionKeyServiceClient; +export const AsymmetricSignatureCryptoServiceClient = cloudApi.kms.asymmetric_signature_crypto_service.AsymmetricSignatureCryptoServiceClient; +export const AsymmetricSignatureKeyServiceClient = cloudApi.kms.asymmetric_signature_key_service.AsymmetricSignatureKeyServiceClient; export const NetworkLoadBalancerServiceClient = cloudApi.loadbalancer.network_load_balancer_service.NetworkLoadBalancerServiceClient; export const TargetGroupServiceClient = cloudApi.loadbalancer.target_group_service.TargetGroupServiceClient; +export const AgentRegistrationServiceClient = cloudApi.loadtesting.agent_agent_registration_service.AgentRegistrationServiceClient; +export const AgentServiceClient = cloudApi.loadtesting.agent_service.AgentServiceClient; +export const JobServiceClient = cloudApi.loadtesting.agent_job_service.JobServiceClient; +export const MonitoringServiceClient = cloudApi.loadtesting.agent_monitoring_service.MonitoringServiceClient; +export const TestServiceClient = cloudApi.loadtesting.agent_test_service.TestServiceClient; +export const TrailServiceClient = cloudApi.loadtesting.agent_trail_service.TrailServiceClient; +export const AgentServiceClient = cloudApi.loadtesting.api_agent_service.AgentServiceClient; export const PayloadServiceClient = cloudApi.lockbox.payload_service.PayloadServiceClient; export const SecretServiceClient = cloudApi.lockbox.secret_service.SecretServiceClient; +export const ExportServiceClient = cloudApi.logging.export_service.ExportServiceClient; export const LogGroupServiceClient = cloudApi.logging.log_group_service.LogGroupServiceClient; export const LogIngestionServiceClient = cloudApi.logging.log_ingestion_service.LogIngestionServiceClient; export const LogReadingServiceClient = cloudApi.logging.log_reading_service.LogReadingServiceClient; +export const SinkServiceClient = cloudApi.logging.sink_service.SinkServiceClient; +export const InstanceServiceClient = cloudApi.marketplace.licensemanager_instance_service.InstanceServiceClient; +export const LockServiceClient = cloudApi.marketplace.licensemanager_lock_service.LockServiceClient; +export const ImageProductUsageServiceClient = cloudApi.marketplace.metering_image_product_usage_service.ImageProductUsageServiceClient; export const ImageProductUsageServiceClient = cloudApi.marketplace.image_product_usage_service.ImageProductUsageServiceClient; export const ClickHouseBackupServiceClient = cloudApi.mdb.clickhouse_backup_service.BackupServiceClient; export const ClickHouseClusterServiceClient = cloudApi.mdb.clickhouse_cluster_service.ClusterServiceClient; @@ -114,9 +140,13 @@ export const MysqlClusterServiceClient = cloudApi.mdb.mysql_cluster_service.Clus export const MysqlDatabaseServiceClient = cloudApi.mdb.mysql_database_service.DatabaseServiceClient; export const MysqlResourcePresetServiceClient = cloudApi.mdb.mysql_resource_preset_service.ResourcePresetServiceClient; export const MysqlUserServiceClient = cloudApi.mdb.mysql_user_service.UserServiceClient; +export const OpenSearchBackupServiceClient = cloudApi.mdb.opensearch_backup_service.BackupServiceClient; +export const OpenSearchClusterServiceClient = cloudApi.mdb.opensearch_cluster_service.ClusterServiceClient; +export const OpenSearchResourcePresetServiceClient = cloudApi.mdb.opensearch_resource_preset_service.ResourcePresetServiceClient; export const PgsqlBackupServiceClient = cloudApi.mdb.postgresql_backup_service.BackupServiceClient; export const PgsqlClusterServiceClient = cloudApi.mdb.postgresql_cluster_service.ClusterServiceClient; export const PgsqlDatabaseServiceClient = cloudApi.mdb.postgresql_database_service.DatabaseServiceClient; +export const PgsqlPerformanceDiagnosticsServiceClient = cloudApi.mdb.postgresql_perf_diag_service.PerformanceDiagnosticsServiceClient; export const PgsqlResourcePresetServiceClient = cloudApi.mdb.postgresql_resource_preset_service.ResourcePresetServiceClient; export const PgsqlUserServiceClient = cloudApi.mdb.postgresql_user_service.UserServiceClient; export const RedisBackupServiceClient = cloudApi.mdb.redis_backup_service.BackupServiceClient; @@ -129,8 +159,10 @@ export const SqlServerResourcePresetServiceClient = cloudApi.mdb.sqlserver_resou export const SqlServerUserServiceClient = cloudApi.mdb.sqlserver_user_service.UserServiceClient; export const DashboardServiceClient = cloudApi.monitoring.dashboard_service.DashboardServiceClient; export const OperationServiceClient = cloudApi.operation.operation_service.OperationServiceClient; +export const GroupMappingServiceClient = cloudApi.organizationmanager.group_mapping_service.GroupMappingServiceClient; export const GroupServiceClient = cloudApi.organizationmanager.group_service.GroupServiceClient; export const OrganizationServiceClient = cloudApi.organizationmanager.organization_service.OrganizationServiceClient; +export const SshCertificateServiceClient = cloudApi.organizationmanager.ssh_certificate_service.SshCertificateServiceClient; export const UserServiceClient = cloudApi.organizationmanager.user_service.UserServiceClient; export const OmCertificateServiceClient = cloudApi.organizationmanager.certificate_service.CertificateServiceClient; export const FederationServiceClient = cloudApi.organizationmanager.federation_service.FederationServiceClient; @@ -153,4 +185,4 @@ export const YdbBackupServiceClient = cloudApi.ydb.backup_service.BackupServiceC export const YdbDatabaseServiceClient = cloudApi.ydb.database_service.DatabaseServiceClient; export const LocationServiceClient = cloudApi.ydb.location_service.LocationServiceClient; export const YdbResourcePresetServiceClient = cloudApi.ydb.resource_preset_service.ResourcePresetServiceClient; -export const StorageTypeServiceClient = cloudApi.ydb.storage_type_service.StorageTypeServiceClient; +export const StorageTypeServiceClient = cloudApi.ydb.storage_type_service.StorageTypeServiceClient; \ No newline at end of file diff --git a/src/generated/yandex/cloud/storage/v1/bucket.ts b/src/generated/yandex/cloud/storage/v1/bucket.ts index 7116ec3c..6940a739 100644 --- a/src/generated/yandex/cloud/storage/v1/bucket.ts +++ b/src/generated/yandex/cloud/storage/v1/bucket.ts @@ -143,6 +143,29 @@ export interface Bucket { * For details, see [documentation](/docs/storage/concepts/lifecycles). */ lifecycleRules: LifecycleRule[]; + /** + * List of tags for the bucket. + * For details, see [documentation](/docs/resource-manager/concepts/labels). + */ + tags: Tag[]; + /** + * Configuration for object lock on the bucket. + * For details about the concept, see [documentation](/docs/storage/concepts/object-lock). + */ + objectLock?: ObjectLock; + /** + * Configuration for bucket's encryption + * For detauls, see [documentation](/docs/storage/concepts/encryption) + */ + encryption?: Encryption; +} + +export interface Tag { + $type: "yandex.cloud.storage.v1.Tag"; + /** Key of the bucket tag. */ + key: string; + /** Value of the bucket tag. */ + value: string; } export interface ACL { @@ -740,6 +763,10 @@ export interface LifecycleRule_RuleFilter { $type: "yandex.cloud.storage.v1.LifecycleRule.RuleFilter"; /** Key prefix that the object must have in order for the rule to apply. */ prefix: string; + /** Size that the object must be greater. */ + objectSizeGreaterThan?: number; + /** Size that the object must be less t. */ + objectSizeLessThan?: number; } export interface Counters { @@ -902,6 +929,123 @@ export function hTTPSConfig_SourceTypeToJSON( } } +/** + * A resource for Object Lock configuration of a bucket. + * For details about the concept, see [documentation](/docs/storage/concepts/object-lock). + */ +export interface ObjectLock { + $type: "yandex.cloud.storage.v1.ObjectLock"; + status: ObjectLock_ObjectLockStatus; + defaultRetention?: ObjectLock_DefaultRetention; +} + +/** Activity status of the object lock settings on the bucket */ +export enum ObjectLock_ObjectLockStatus { + OBJECT_LOCK_STATUS_UNSPECIFIED = 0, + OBJECT_LOCK_STATUS_DISABLED = 1, + OBJECT_LOCK_STATUS_ENABLED = 2, + UNRECOGNIZED = -1, +} + +export function objectLock_ObjectLockStatusFromJSON( + object: any +): ObjectLock_ObjectLockStatus { + switch (object) { + case 0: + case "OBJECT_LOCK_STATUS_UNSPECIFIED": + return ObjectLock_ObjectLockStatus.OBJECT_LOCK_STATUS_UNSPECIFIED; + case 1: + case "OBJECT_LOCK_STATUS_DISABLED": + return ObjectLock_ObjectLockStatus.OBJECT_LOCK_STATUS_DISABLED; + case 2: + case "OBJECT_LOCK_STATUS_ENABLED": + return ObjectLock_ObjectLockStatus.OBJECT_LOCK_STATUS_ENABLED; + case -1: + case "UNRECOGNIZED": + default: + return ObjectLock_ObjectLockStatus.UNRECOGNIZED; + } +} + +export function objectLock_ObjectLockStatusToJSON( + object: ObjectLock_ObjectLockStatus +): string { + switch (object) { + case ObjectLock_ObjectLockStatus.OBJECT_LOCK_STATUS_UNSPECIFIED: + return "OBJECT_LOCK_STATUS_UNSPECIFIED"; + case ObjectLock_ObjectLockStatus.OBJECT_LOCK_STATUS_DISABLED: + return "OBJECT_LOCK_STATUS_DISABLED"; + case ObjectLock_ObjectLockStatus.OBJECT_LOCK_STATUS_ENABLED: + return "OBJECT_LOCK_STATUS_ENABLED"; + default: + return "UNKNOWN"; + } +} + +/** Default lock configuration for added objects */ +export interface ObjectLock_DefaultRetention { + $type: "yandex.cloud.storage.v1.ObjectLock.DefaultRetention"; + mode: ObjectLock_DefaultRetention_Mode; + /** Number of days for locking */ + days: number | undefined; + /** Number of years for locking */ + years: number | undefined; +} + +/** Lock type */ +export enum ObjectLock_DefaultRetention_Mode { + MODE_UNSPECIFIED = 0, + MODE_GOVERNANCE = 1, + MODE_COMPLIANCE = 2, + UNRECOGNIZED = -1, +} + +export function objectLock_DefaultRetention_ModeFromJSON( + object: any +): ObjectLock_DefaultRetention_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return ObjectLock_DefaultRetention_Mode.MODE_UNSPECIFIED; + case 1: + case "MODE_GOVERNANCE": + return ObjectLock_DefaultRetention_Mode.MODE_GOVERNANCE; + case 2: + case "MODE_COMPLIANCE": + return ObjectLock_DefaultRetention_Mode.MODE_COMPLIANCE; + case -1: + case "UNRECOGNIZED": + default: + return ObjectLock_DefaultRetention_Mode.UNRECOGNIZED; + } +} + +export function objectLock_DefaultRetention_ModeToJSON( + object: ObjectLock_DefaultRetention_Mode +): string { + switch (object) { + case ObjectLock_DefaultRetention_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case ObjectLock_DefaultRetention_Mode.MODE_GOVERNANCE: + return "MODE_GOVERNANCE"; + case ObjectLock_DefaultRetention_Mode.MODE_COMPLIANCE: + return "MODE_COMPLIANCE"; + default: + return "UNKNOWN"; + } +} + +export interface Encryption { + $type: "yandex.cloud.storage.v1.Encryption"; + rules: Encryption_EncryptionRule[]; +} + +export interface Encryption_EncryptionRule { + $type: "yandex.cloud.storage.v1.Encryption.EncryptionRule"; + kmsMasterKeyId: string; + sseAlgorithm: string; +} + const baseBucket: object = { $type: "yandex.cloud.storage.v1.Bucket", id: "", @@ -970,6 +1114,15 @@ export const Bucket = { for (const v of message.lifecycleRules) { LifecycleRule.encode(v!, writer.uint32(106).fork()).ldelim(); } + for (const v of message.tags) { + Tag.encode(v!, writer.uint32(114).fork()).ldelim(); + } + if (message.objectLock !== undefined) { + ObjectLock.encode(message.objectLock, writer.uint32(122).fork()).ldelim(); + } + if (message.encryption !== undefined) { + Encryption.encode(message.encryption, writer.uint32(130).fork()).ldelim(); + } return writer; }, @@ -979,6 +1132,7 @@ export const Bucket = { const message = { ...baseBucket } as Bucket; message.cors = []; message.lifecycleRules = []; + message.tags = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1033,6 +1187,15 @@ export const Bucket = { LifecycleRule.decode(reader, reader.uint32()) ); break; + case 14: + message.tags.push(Tag.decode(reader, reader.uint32())); + break; + case 15: + message.objectLock = ObjectLock.decode(reader, reader.uint32()); + break; + case 16: + message.encryption = Encryption.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1089,6 +1252,15 @@ export const Bucket = { message.lifecycleRules = (object.lifecycleRules ?? []).map((e: any) => LifecycleRule.fromJSON(e) ); + message.tags = (object.tags ?? []).map((e: any) => Tag.fromJSON(e)); + message.objectLock = + object.objectLock !== undefined && object.objectLock !== null + ? ObjectLock.fromJSON(object.objectLock) + : undefined; + message.encryption = + object.encryption !== undefined && object.encryption !== null + ? Encryption.fromJSON(object.encryption) + : undefined; return message; }, @@ -1128,6 +1300,19 @@ export const Bucket = { } else { obj.lifecycleRules = []; } + if (message.tags) { + obj.tags = message.tags.map((e) => (e ? Tag.toJSON(e) : undefined)); + } else { + obj.tags = []; + } + message.objectLock !== undefined && + (obj.objectLock = message.objectLock + ? ObjectLock.toJSON(message.objectLock) + : undefined); + message.encryption !== undefined && + (obj.encryption = message.encryption + ? Encryption.toJSON(message.encryption) + : undefined); return obj; }, @@ -1157,12 +1342,89 @@ export const Bucket = { : undefined; message.lifecycleRules = object.lifecycleRules?.map((e) => LifecycleRule.fromPartial(e)) || []; + message.tags = object.tags?.map((e) => Tag.fromPartial(e)) || []; + message.objectLock = + object.objectLock !== undefined && object.objectLock !== null + ? ObjectLock.fromPartial(object.objectLock) + : undefined; + message.encryption = + object.encryption !== undefined && object.encryption !== null + ? Encryption.fromPartial(object.encryption) + : undefined; return message; }, }; messageTypeRegistry.set(Bucket.$type, Bucket); +const baseTag: object = { + $type: "yandex.cloud.storage.v1.Tag", + key: "", + value: "", +}; + +export const Tag = { + $type: "yandex.cloud.storage.v1.Tag" as const, + + encode(message: Tag, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Tag { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTag } as Tag; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Tag { + const message = { ...baseTag } as Tag; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Tag): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>(object: I): Tag { + const message = { ...baseTag } as Tag; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Tag.$type, Tag); + const baseACL: object = { $type: "yandex.cloud.storage.v1.ACL" }; export const ACL = { @@ -2828,6 +3090,24 @@ export const LifecycleRule_RuleFilter = { if (message.prefix !== "") { writer.uint32(10).string(message.prefix); } + if (message.objectSizeGreaterThan !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.objectSizeGreaterThan!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.objectSizeLessThan !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.objectSizeLessThan!, + }, + writer.uint32(26).fork() + ).ldelim(); + } return writer; }, @@ -2846,6 +3126,18 @@ export const LifecycleRule_RuleFilter = { case 1: message.prefix = reader.string(); break; + case 2: + message.objectSizeGreaterThan = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.objectSizeLessThan = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2862,12 +3154,26 @@ export const LifecycleRule_RuleFilter = { object.prefix !== undefined && object.prefix !== null ? String(object.prefix) : ""; + message.objectSizeGreaterThan = + object.objectSizeGreaterThan !== undefined && + object.objectSizeGreaterThan !== null + ? Number(object.objectSizeGreaterThan) + : undefined; + message.objectSizeLessThan = + object.objectSizeLessThan !== undefined && + object.objectSizeLessThan !== null + ? Number(object.objectSizeLessThan) + : undefined; return message; }, toJSON(message: LifecycleRule_RuleFilter): unknown { const obj: any = {}; message.prefix !== undefined && (obj.prefix = message.prefix); + message.objectSizeGreaterThan !== undefined && + (obj.objectSizeGreaterThan = message.objectSizeGreaterThan); + message.objectSizeLessThan !== undefined && + (obj.objectSizeLessThan = message.objectSizeLessThan); return obj; }, @@ -2878,6 +3184,8 @@ export const LifecycleRule_RuleFilter = { ...baseLifecycleRule_RuleFilter, } as LifecycleRule_RuleFilter; message.prefix = object.prefix ?? ""; + message.objectSizeGreaterThan = object.objectSizeGreaterThan ?? undefined; + message.objectSizeLessThan = object.objectSizeLessThan ?? undefined; return message; }, }; @@ -3687,6 +3995,349 @@ export const HTTPSConfig = { messageTypeRegistry.set(HTTPSConfig.$type, HTTPSConfig); +const baseObjectLock: object = { + $type: "yandex.cloud.storage.v1.ObjectLock", + status: 0, +}; + +export const ObjectLock = { + $type: "yandex.cloud.storage.v1.ObjectLock" as const, + + encode( + message: ObjectLock, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.status !== 0) { + writer.uint32(16).int32(message.status); + } + if (message.defaultRetention !== undefined) { + ObjectLock_DefaultRetention.encode( + message.defaultRetention, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ObjectLock { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseObjectLock } as ObjectLock; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.status = reader.int32() as any; + break; + case 3: + message.defaultRetention = ObjectLock_DefaultRetention.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ObjectLock { + const message = { ...baseObjectLock } as ObjectLock; + message.status = + object.status !== undefined && object.status !== null + ? objectLock_ObjectLockStatusFromJSON(object.status) + : 0; + message.defaultRetention = + object.defaultRetention !== undefined && object.defaultRetention !== null + ? ObjectLock_DefaultRetention.fromJSON(object.defaultRetention) + : undefined; + return message; + }, + + toJSON(message: ObjectLock): unknown { + const obj: any = {}; + message.status !== undefined && + (obj.status = objectLock_ObjectLockStatusToJSON(message.status)); + message.defaultRetention !== undefined && + (obj.defaultRetention = message.defaultRetention + ? ObjectLock_DefaultRetention.toJSON(message.defaultRetention) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ObjectLock { + const message = { ...baseObjectLock } as ObjectLock; + message.status = object.status ?? 0; + message.defaultRetention = + object.defaultRetention !== undefined && object.defaultRetention !== null + ? ObjectLock_DefaultRetention.fromPartial(object.defaultRetention) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ObjectLock.$type, ObjectLock); + +const baseObjectLock_DefaultRetention: object = { + $type: "yandex.cloud.storage.v1.ObjectLock.DefaultRetention", + mode: 0, +}; + +export const ObjectLock_DefaultRetention = { + $type: "yandex.cloud.storage.v1.ObjectLock.DefaultRetention" as const, + + encode( + message: ObjectLock_DefaultRetention, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.days !== undefined) { + writer.uint32(16).int64(message.days); + } + if (message.years !== undefined) { + writer.uint32(24).int64(message.years); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ObjectLock_DefaultRetention { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseObjectLock_DefaultRetention, + } as ObjectLock_DefaultRetention; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.days = longToNumber(reader.int64() as Long); + break; + case 3: + message.years = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ObjectLock_DefaultRetention { + const message = { + ...baseObjectLock_DefaultRetention, + } as ObjectLock_DefaultRetention; + message.mode = + object.mode !== undefined && object.mode !== null + ? objectLock_DefaultRetention_ModeFromJSON(object.mode) + : 0; + message.days = + object.days !== undefined && object.days !== null + ? Number(object.days) + : undefined; + message.years = + object.years !== undefined && object.years !== null + ? Number(object.years) + : undefined; + return message; + }, + + toJSON(message: ObjectLock_DefaultRetention): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = objectLock_DefaultRetention_ModeToJSON(message.mode)); + message.days !== undefined && (obj.days = Math.round(message.days)); + message.years !== undefined && (obj.years = Math.round(message.years)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ObjectLock_DefaultRetention { + const message = { + ...baseObjectLock_DefaultRetention, + } as ObjectLock_DefaultRetention; + message.mode = object.mode ?? 0; + message.days = object.days ?? undefined; + message.years = object.years ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + ObjectLock_DefaultRetention.$type, + ObjectLock_DefaultRetention +); + +const baseEncryption: object = { $type: "yandex.cloud.storage.v1.Encryption" }; + +export const Encryption = { + $type: "yandex.cloud.storage.v1.Encryption" as const, + + encode( + message: Encryption, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.rules) { + Encryption_EncryptionRule.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Encryption { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEncryption } as Encryption; + message.rules = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.rules.push( + Encryption_EncryptionRule.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Encryption { + const message = { ...baseEncryption } as Encryption; + message.rules = (object.rules ?? []).map((e: any) => + Encryption_EncryptionRule.fromJSON(e) + ); + return message; + }, + + toJSON(message: Encryption): unknown { + const obj: any = {}; + if (message.rules) { + obj.rules = message.rules.map((e) => + e ? Encryption_EncryptionRule.toJSON(e) : undefined + ); + } else { + obj.rules = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): Encryption { + const message = { ...baseEncryption } as Encryption; + message.rules = + object.rules?.map((e) => Encryption_EncryptionRule.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Encryption.$type, Encryption); + +const baseEncryption_EncryptionRule: object = { + $type: "yandex.cloud.storage.v1.Encryption.EncryptionRule", + kmsMasterKeyId: "", + sseAlgorithm: "", +}; + +export const Encryption_EncryptionRule = { + $type: "yandex.cloud.storage.v1.Encryption.EncryptionRule" as const, + + encode( + message: Encryption_EncryptionRule, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.kmsMasterKeyId !== "") { + writer.uint32(10).string(message.kmsMasterKeyId); + } + if (message.sseAlgorithm !== "") { + writer.uint32(18).string(message.sseAlgorithm); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Encryption_EncryptionRule { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseEncryption_EncryptionRule, + } as Encryption_EncryptionRule; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.kmsMasterKeyId = reader.string(); + break; + case 2: + message.sseAlgorithm = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Encryption_EncryptionRule { + const message = { + ...baseEncryption_EncryptionRule, + } as Encryption_EncryptionRule; + message.kmsMasterKeyId = + object.kmsMasterKeyId !== undefined && object.kmsMasterKeyId !== null + ? String(object.kmsMasterKeyId) + : ""; + message.sseAlgorithm = + object.sseAlgorithm !== undefined && object.sseAlgorithm !== null + ? String(object.sseAlgorithm) + : ""; + return message; + }, + + toJSON(message: Encryption_EncryptionRule): unknown { + const obj: any = {}; + message.kmsMasterKeyId !== undefined && + (obj.kmsMasterKeyId = message.kmsMasterKeyId); + message.sseAlgorithm !== undefined && + (obj.sseAlgorithm = message.sseAlgorithm); + return obj; + }, + + fromPartial, I>>( + object: I + ): Encryption_EncryptionRule { + const message = { + ...baseEncryption_EncryptionRule, + } as Encryption_EncryptionRule; + message.kmsMasterKeyId = object.kmsMasterKeyId ?? ""; + message.sseAlgorithm = object.sseAlgorithm ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + Encryption_EncryptionRule.$type, + Encryption_EncryptionRule +); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/storage/v1/bucket_service.ts b/src/generated/yandex/cloud/storage/v1/bucket_service.ts index cdf8d935..c503f5d8 100644 --- a/src/generated/yandex/cloud/storage/v1/bucket_service.ts +++ b/src/generated/yandex/cloud/storage/v1/bucket_service.ts @@ -19,7 +19,10 @@ import { ACL, WebsiteSettings, Versioning, + ObjectLock, + Encryption, Bucket, + Tag, CorsRule, LifecycleRule, BucketStats, @@ -56,13 +59,14 @@ export enum GetBucketRequest_View { * VIEW_BASIC - Returns basic information about a bucket. * * The following fields will _not_ be returned: [Bucket.acl], [Bucket.cors], [Bucket.website_settings], - * [Bucket.lifecycle_rules]. + * [Bucket.lifecycle_rules], [Bucket.tags]. */ VIEW_BASIC = 1, /** * VIEW_ACL - Returns basic information and access control list (ACL) for the bucket. * - * The following fields will _not_ be returned: [Bucket.cors], [Bucket.website_settings], [Bucket.lifecycle_rules]. + * The following fields will _not_ be returned: [Bucket.cors], [Bucket.website_settings], [Bucket.lifecycle_rules], + * [Bucket.tags]. */ VIEW_ACL = 2, /** VIEW_FULL - Returns full information about a bucket. */ @@ -162,6 +166,11 @@ export interface CreateBucketRequest { * For details, see [documentation](/docs/storage/concepts/acl). */ acl?: ACL; + /** + * List of tags for the bucket. + * For details, see [documentation](/docs/resource-manager/concepts/labels). + */ + tags: Tag[]; } export interface CreateBucketMetadata { @@ -180,8 +189,11 @@ export interface UpdateBucketRequest { * To get the bucket name, make a [BucketService.List] request. */ name: string; - /** Field mask that specifies which attributes of the bucket should be updated. */ - fieldMask?: FieldMask; + /** + * Update mask that specifies which attributes of the bucket should be updated. + * Use * for full update. + */ + updateMask?: FieldMask; /** * Flags for configuring public (anonymous) access to the bucket's content and settings. * For details, see [documentation](/docs/storage/concepts/bucket#bucket-access). @@ -228,6 +240,21 @@ export interface UpdateBucketRequest { * For details, see [documentation](/docs/storage/concepts/acl). */ acl?: ACL; + /** + * List of tags for the bucket. + * For details, see [documentation](/docs/resource-manager/concepts/labels). + */ + tags: Tag[]; + /** + * Configuration for object lock on the bucket. + * For details about the concept, see [documentation](/docs/storage/concepts/object-lock). + */ + objectLock?: ObjectLock; + /** + * Configuration for bucket's encryption + * For detauls, see [documentation](/docs/storage/concepts/encryption) + */ + encryption?: Encryption; } export interface UpdateBucketMetadata { @@ -563,6 +590,9 @@ export const CreateBucketRequest = { if (message.acl !== undefined) { ACL.encode(message.acl, writer.uint32(58).fork()).ldelim(); } + for (const v of message.tags) { + Tag.encode(v!, writer.uint32(66).fork()).ldelim(); + } return writer; }, @@ -570,6 +600,7 @@ export const CreateBucketRequest = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseCreateBucketRequest } as CreateBucketRequest; + message.tags = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -594,6 +625,9 @@ export const CreateBucketRequest = { case 7: message.acl = ACL.decode(reader, reader.uint32()); break; + case 8: + message.tags.push(Tag.decode(reader, reader.uint32())); + break; default: reader.skipType(tag & 7); break; @@ -630,6 +664,7 @@ export const CreateBucketRequest = { object.acl !== undefined && object.acl !== null ? ACL.fromJSON(object.acl) : undefined; + message.tags = (object.tags ?? []).map((e: any) => Tag.fromJSON(e)); return message; }, @@ -647,6 +682,11 @@ export const CreateBucketRequest = { : undefined); message.acl !== undefined && (obj.acl = message.acl ? ACL.toJSON(message.acl) : undefined); + if (message.tags) { + obj.tags = message.tags.map((e) => (e ? Tag.toJSON(e) : undefined)); + } else { + obj.tags = []; + } return obj; }, @@ -667,6 +707,7 @@ export const CreateBucketRequest = { object.acl !== undefined && object.acl !== null ? ACL.fromPartial(object.acl) : undefined; + message.tags = object.tags?.map((e) => Tag.fromPartial(e)) || []; return message; }, }; @@ -756,8 +797,8 @@ export const UpdateBucketRequest = { if (message.name !== "") { writer.uint32(10).string(message.name); } - if (message.fieldMask !== undefined) { - FieldMask.encode(message.fieldMask, writer.uint32(18).fork()).ldelim(); + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); } if (message.anonymousAccessFlags !== undefined) { AnonymousAccessFlags.encode( @@ -795,6 +836,15 @@ export const UpdateBucketRequest = { if (message.acl !== undefined) { ACL.encode(message.acl, writer.uint32(90).fork()).ldelim(); } + for (const v of message.tags) { + Tag.encode(v!, writer.uint32(98).fork()).ldelim(); + } + if (message.objectLock !== undefined) { + ObjectLock.encode(message.objectLock, writer.uint32(106).fork()).ldelim(); + } + if (message.encryption !== undefined) { + Encryption.encode(message.encryption, writer.uint32(114).fork()).ldelim(); + } return writer; }, @@ -804,6 +854,7 @@ export const UpdateBucketRequest = { const message = { ...baseUpdateBucketRequest } as UpdateBucketRequest; message.cors = []; message.lifecycleRules = []; + message.tags = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -811,7 +862,7 @@ export const UpdateBucketRequest = { message.name = reader.string(); break; case 2: - message.fieldMask = FieldMask.decode(reader, reader.uint32()); + message.updateMask = FieldMask.decode(reader, reader.uint32()); break; case 3: message.anonymousAccessFlags = AnonymousAccessFlags.decode( @@ -850,6 +901,15 @@ export const UpdateBucketRequest = { case 11: message.acl = ACL.decode(reader, reader.uint32()); break; + case 12: + message.tags.push(Tag.decode(reader, reader.uint32())); + break; + case 13: + message.objectLock = ObjectLock.decode(reader, reader.uint32()); + break; + case 14: + message.encryption = Encryption.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -864,9 +924,9 @@ export const UpdateBucketRequest = { object.name !== undefined && object.name !== null ? String(object.name) : ""; - message.fieldMask = - object.fieldMask !== undefined && object.fieldMask !== null - ? FieldMask.fromJSON(object.fieldMask) + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) : undefined; message.anonymousAccessFlags = object.anonymousAccessFlags !== undefined && @@ -900,15 +960,24 @@ export const UpdateBucketRequest = { object.acl !== undefined && object.acl !== null ? ACL.fromJSON(object.acl) : undefined; + message.tags = (object.tags ?? []).map((e: any) => Tag.fromJSON(e)); + message.objectLock = + object.objectLock !== undefined && object.objectLock !== null + ? ObjectLock.fromJSON(object.objectLock) + : undefined; + message.encryption = + object.encryption !== undefined && object.encryption !== null + ? Encryption.fromJSON(object.encryption) + : undefined; return message; }, toJSON(message: UpdateBucketRequest): unknown { const obj: any = {}; message.name !== undefined && (obj.name = message.name); - message.fieldMask !== undefined && - (obj.fieldMask = message.fieldMask - ? FieldMask.toJSON(message.fieldMask) + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) : undefined); message.anonymousAccessFlags !== undefined && (obj.anonymousAccessFlags = message.anonymousAccessFlags @@ -939,6 +1008,19 @@ export const UpdateBucketRequest = { message.policy !== undefined && (obj.policy = message.policy); message.acl !== undefined && (obj.acl = message.acl ? ACL.toJSON(message.acl) : undefined); + if (message.tags) { + obj.tags = message.tags.map((e) => (e ? Tag.toJSON(e) : undefined)); + } else { + obj.tags = []; + } + message.objectLock !== undefined && + (obj.objectLock = message.objectLock + ? ObjectLock.toJSON(message.objectLock) + : undefined); + message.encryption !== undefined && + (obj.encryption = message.encryption + ? Encryption.toJSON(message.encryption) + : undefined); return obj; }, @@ -947,9 +1029,9 @@ export const UpdateBucketRequest = { ): UpdateBucketRequest { const message = { ...baseUpdateBucketRequest } as UpdateBucketRequest; message.name = object.name ?? ""; - message.fieldMask = - object.fieldMask !== undefined && object.fieldMask !== null - ? FieldMask.fromPartial(object.fieldMask) + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) : undefined; message.anonymousAccessFlags = object.anonymousAccessFlags !== undefined && @@ -971,6 +1053,15 @@ export const UpdateBucketRequest = { object.acl !== undefined && object.acl !== null ? ACL.fromPartial(object.acl) : undefined; + message.tags = object.tags?.map((e) => Tag.fromPartial(e)) || []; + message.objectLock = + object.objectLock !== undefined && object.objectLock !== null + ? ObjectLock.fromPartial(object.objectLock) + : undefined; + message.encryption = + object.encryption !== undefined && object.encryption !== null + ? Encryption.fromPartial(object.encryption) + : undefined; return message; }, }; @@ -1826,7 +1917,7 @@ export const BucketServiceService = { * Retrieves the list of buckets in the specified folder. * * The following fields will not be returned for buckets in the list: [Bucket.policy], [Bucket.acl], [Bucket.cors], - * [Bucket.website_settings], [Bucket.lifecycle_rules]. + * [Bucket.website_settings], [Bucket.lifecycle_rules], [Bucket.tags]. */ list: { path: "/yandex.cloud.storage.v1.BucketService/List", @@ -1958,7 +2049,7 @@ export interface BucketServiceServer extends UntypedServiceImplementation { * Retrieves the list of buckets in the specified folder. * * The following fields will not be returned for buckets in the list: [Bucket.policy], [Bucket.acl], [Bucket.cors], - * [Bucket.website_settings], [Bucket.lifecycle_rules]. + * [Bucket.website_settings], [Bucket.lifecycle_rules], [Bucket.tags]. */ list: handleUnaryCall; /** @@ -1997,7 +2088,7 @@ export interface BucketServiceClient extends Client { * Retrieves the list of buckets in the specified folder. * * The following fields will not be returned for buckets in the list: [Bucket.policy], [Bucket.acl], [Bucket.cors], - * [Bucket.website_settings], [Bucket.lifecycle_rules]. + * [Bucket.website_settings], [Bucket.lifecycle_rules], [Bucket.tags]. */ list( request: ListBucketsRequest, diff --git a/src/generated/yandex/cloud/vpc/v1/address.ts b/src/generated/yandex/cloud/vpc/v1/address.ts index 750808fd..e0853e9a 100644 --- a/src/generated/yandex/cloud/vpc/v1/address.ts +++ b/src/generated/yandex/cloud/vpc/v1/address.ts @@ -33,6 +33,8 @@ export interface Address { type: Address_Type; /** Vervion of the IP address. */ ipVersion: Address_IpVersion; + /** Specifies if address protected from deletion. */ + deletionProtection: boolean; } export enum Address_Type { @@ -149,6 +151,7 @@ const baseAddress: object = { used: false, type: 0, ipVersion: 0, + deletionProtection: false, }; export const Address = { @@ -204,6 +207,9 @@ export const Address = { if (message.ipVersion !== 0) { writer.uint32(144).int32(message.ipVersion); } + if (message.deletionProtection === true) { + writer.uint32(152).bool(message.deletionProtection); + } return writer; }, @@ -256,6 +262,9 @@ export const Address = { case 18: message.ipVersion = reader.int32() as any; break; + case 19: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -311,6 +320,11 @@ export const Address = { object.ipVersion !== undefined && object.ipVersion !== null ? address_IpVersionFromJSON(object.ipVersion) : 0; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -338,6 +352,8 @@ export const Address = { message.type !== undefined && (obj.type = address_TypeToJSON(message.type)); message.ipVersion !== undefined && (obj.ipVersion = address_IpVersionToJSON(message.ipVersion)); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -365,6 +381,7 @@ export const Address = { message.used = object.used ?? false; message.type = object.type ?? 0; message.ipVersion = object.ipVersion ?? 0; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/vpc/v1/address_service.ts b/src/generated/yandex/cloud/vpc/v1/address_service.ts index 6a0535e6..6281757e 100644 --- a/src/generated/yandex/cloud/vpc/v1/address_service.ts +++ b/src/generated/yandex/cloud/vpc/v1/address_service.ts @@ -102,6 +102,8 @@ export interface CreateAddressRequest { /** Address labels as `key:value` pairs. */ labels: { [key: string]: string }; externalIpv4AddressSpec?: ExternalIpv4AddressSpec | undefined; + /** Specifies if address protected from deletion. */ + deletionProtection: boolean; } export interface CreateAddressRequest_LabelsEntry { @@ -155,6 +157,8 @@ export interface UpdateAddressRequest { labels: { [key: string]: string }; /** Specifies if address is reserved or not. */ reserved: boolean; + /** Specifies if address protected from deletion. */ + deletionProtection: boolean; } export interface UpdateAddressRequest_LabelsEntry { @@ -564,6 +568,7 @@ const baseCreateAddressRequest: object = { folderId: "", name: "", description: "", + deletionProtection: false, }; export const CreateAddressRequest = { @@ -598,6 +603,9 @@ export const CreateAddressRequest = { writer.uint32(42).fork() ).ldelim(); } + if (message.deletionProtection === true) { + writer.uint32(80).bool(message.deletionProtection); + } return writer; }, @@ -636,6 +644,9 @@ export const CreateAddressRequest = { reader.uint32() ); break; + case 10: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -669,6 +680,11 @@ export const CreateAddressRequest = { object.externalIpv4AddressSpec !== null ? ExternalIpv4AddressSpec.fromJSON(object.externalIpv4AddressSpec) : undefined; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -688,6 +704,8 @@ export const CreateAddressRequest = { (obj.externalIpv4AddressSpec = message.externalIpv4AddressSpec ? ExternalIpv4AddressSpec.toJSON(message.externalIpv4AddressSpec) : undefined); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -711,6 +729,7 @@ export const CreateAddressRequest = { object.externalIpv4AddressSpec !== null ? ExternalIpv4AddressSpec.fromPartial(object.externalIpv4AddressSpec) : undefined; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; @@ -981,6 +1000,7 @@ const baseUpdateAddressRequest: object = { name: "", description: "", reserved: false, + deletionProtection: false, }; export const UpdateAddressRequest = { @@ -1015,6 +1035,9 @@ export const UpdateAddressRequest = { if (message.reserved === true) { writer.uint32(48).bool(message.reserved); } + if (message.deletionProtection === true) { + writer.uint32(56).bool(message.deletionProtection); + } return writer; }, @@ -1053,6 +1076,9 @@ export const UpdateAddressRequest = { case 6: message.reserved = reader.bool(); break; + case 7: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1089,6 +1115,11 @@ export const UpdateAddressRequest = { object.reserved !== undefined && object.reserved !== null ? Boolean(object.reserved) : false; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -1109,6 +1140,8 @@ export const UpdateAddressRequest = { }); } message.reserved !== undefined && (obj.reserved = message.reserved); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -1132,6 +1165,7 @@ export const UpdateAddressRequest = { return acc; }, {}); message.reserved = object.reserved ?? false; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/vpc/v1/gateway.ts b/src/generated/yandex/cloud/vpc/v1/gateway.ts index 254e9774..b3bd5dd3 100644 --- a/src/generated/yandex/cloud/vpc/v1/gateway.ts +++ b/src/generated/yandex/cloud/vpc/v1/gateway.ts @@ -6,7 +6,7 @@ import { Timestamp } from "../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.vpc.v1"; -/** A Gateway resource. For more information, see [Gateway](/docs/vpc/concepts/gateway). */ +/** A Gateway resource. For more information, see [Gateway](/docs/vpc/concepts/gateways). */ export interface Gateway { $type: "yandex.cloud.vpc.v1.Gateway"; /** ID of the gateway. Generated at creation time. */ diff --git a/src/generated/yandex/cloud/vpc/v1/subnet.ts b/src/generated/yandex/cloud/vpc/v1/subnet.ts index f6e66060..2fdb4b27 100644 --- a/src/generated/yandex/cloud/vpc/v1/subnet.ts +++ b/src/generated/yandex/cloud/vpc/v1/subnet.ts @@ -44,7 +44,7 @@ export function ipVersionToJSON(object: IpVersion): string { } } -/** A Subnet resource. For more information, see [Subnets](/docs/vpc/concepts/subnets). */ +/** A Subnet resource. For more information, see [Subnets](/docs/vpc/concepts/network#subnet). */ export interface Subnet { $type: "yandex.cloud.vpc.v1.Subnet"; /** ID of the subnet. */ diff --git a/src/generated/yandex/cloud/ydb/v1/database.ts b/src/generated/yandex/cloud/ydb/v1/database.ts index bfc10595..b7af1f79 100644 --- a/src/generated/yandex/cloud/ydb/v1/database.ts +++ b/src/generated/yandex/cloud/ydb/v1/database.ts @@ -94,6 +94,7 @@ export interface Database { backupConfig?: BackupConfig; documentApiEndpoint: string; kinesisApiEndpoint: string; + kafkaApiEndpoint: string; monitoringConfig?: MonitoringConfig; deletionProtection: boolean; } @@ -333,6 +334,7 @@ const baseDatabase: object = { locationId: "", documentApiEndpoint: "", kinesisApiEndpoint: "", + kafkaApiEndpoint: "", deletionProtection: false, }; @@ -440,6 +442,9 @@ export const Database = { if (message.kinesisApiEndpoint !== "") { writer.uint32(186).string(message.kinesisApiEndpoint); } + if (message.kafkaApiEndpoint !== "") { + writer.uint32(210).string(message.kafkaApiEndpoint); + } if (message.monitoringConfig !== undefined) { MonitoringConfig.encode( message.monitoringConfig, @@ -541,6 +546,9 @@ export const Database = { case 23: message.kinesisApiEndpoint = reader.string(); break; + case 26: + message.kafkaApiEndpoint = reader.string(); + break; case 24: message.monitoringConfig = MonitoringConfig.decode( reader, @@ -649,6 +657,10 @@ export const Database = { object.kinesisApiEndpoint !== null ? String(object.kinesisApiEndpoint) : ""; + message.kafkaApiEndpoint = + object.kafkaApiEndpoint !== undefined && object.kafkaApiEndpoint !== null + ? String(object.kafkaApiEndpoint) + : ""; message.monitoringConfig = object.monitoringConfig !== undefined && object.monitoringConfig !== null ? MonitoringConfig.fromJSON(object.monitoringConfig) @@ -722,6 +734,8 @@ export const Database = { (obj.documentApiEndpoint = message.documentApiEndpoint); message.kinesisApiEndpoint !== undefined && (obj.kinesisApiEndpoint = message.kinesisApiEndpoint); + message.kafkaApiEndpoint !== undefined && + (obj.kafkaApiEndpoint = message.kafkaApiEndpoint); message.monitoringConfig !== undefined && (obj.monitoringConfig = message.monitoringConfig ? MonitoringConfig.toJSON(message.monitoringConfig) @@ -785,6 +799,7 @@ export const Database = { : undefined; message.documentApiEndpoint = object.documentApiEndpoint ?? ""; message.kinesisApiEndpoint = object.kinesisApiEndpoint ?? ""; + message.kafkaApiEndpoint = object.kafkaApiEndpoint ?? ""; message.monitoringConfig = object.monitoringConfig !== undefined && object.monitoringConfig !== null ? MonitoringConfig.fromPartial(object.monitoringConfig) From 87fb6224b9f8696a5d7cdd417faf364e13ef46f5 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Thu, 14 Sep 2023 15:55:47 +0300 Subject: [PATCH 07/12] fixup! feat: update cloudapi --- scripts/services.ts | 42 ++++++++--------- src/generated/yandex/cloud/service_clients.ts | 46 +++++++++---------- 2 files changed, 43 insertions(+), 45 deletions(-) diff --git a/scripts/services.ts b/scripts/services.ts index 103a9bff..9a0904a8 100644 --- a/scripts/services.ts +++ b/scripts/services.ts @@ -19,10 +19,10 @@ export const servicesConfig: ServicesConfig = { resource_service: { importClassName: 'ResourceServiceClient' }, }, backup: { - backup_service: { importClassName: 'BackupServiceClient' }, - policy_service: { importClassName: 'PolicyServiceClient' }, - provider_service: { importClassName: 'ProviderServiceClient' }, - resource_service: { importClassName: 'ResourceServiceClient' }, + backup_service: { importClassName: 'BackupServiceClient', exportClassName: 'BackupBackupServiceClient' }, + policy_service: { importClassName: 'PolicyServiceClient', exportClassName: 'BackupPolicyServiceClient' }, + provider_service: { importClassName: 'ProviderServiceClient', exportClassName: 'BackupProviderServiceClient' }, + resource_service: { importClassName: 'ResourceServiceClient', exportClassName: 'BackupResourceServiceClient' }, }, billing: { billing_account_service: { importClassName: 'BillingAccountServiceClient' }, @@ -32,12 +32,12 @@ export const servicesConfig: ServicesConfig = { sku_service: { importClassName: 'SkuServiceClient' }, }, cdn: { - cache_service: { importClassName: 'CacheServiceClient' }, - origin_group_service: { importClassName: 'OriginGroupServiceClient' }, - origin_service: { importClassName: 'OriginServiceClient' }, - provider_service: { importClassName: 'ProviderServiceClient' }, - resource_service: { importClassName: 'ResourceServiceClient' }, - raw_logs_service: { importClassName: 'RawLogsServiceClient' }, + cache_service: { importClassName: 'CacheServiceClient', exportClassName: 'CDNCacheServiceClient' }, + origin_group_service: { importClassName: 'OriginGroupServiceClient', exportClassName: 'CDNOriginGroupServiceClient' }, + origin_service: { importClassName: 'OriginServiceClient', exportClassName: 'CDNOriginServiceClient' }, + provider_service: { importClassName: 'ProviderServiceClient', exportClassName: 'CDNProviderServiceClient' }, + resource_service: { importClassName: 'ResourceServiceClient', exportClassName: 'CDNResourceServiceClient' }, + raw_logs_service: { importClassName: 'RawLogsServiceClient', exportClassName: 'CDNRawLogsServiceClient' }, }, certificatemanager: { certificate_content_service: { importClassName: 'CertificateContentServiceClient' }, @@ -124,13 +124,13 @@ export const servicesConfig: ServicesConfig = { }, loadtesting: { - agent_agent_registration_service: { importClassName: 'AgentRegistrationServiceClient' }, - agent_service: { importClassName: 'AgentServiceClient' }, - agent_job_service: { importClassName: 'JobServiceClient' }, - agent_monitoring_service: { importClassName: 'MonitoringServiceClient' }, - agent_test_service: { importClassName: 'TestServiceClient' }, - agent_trail_service: { importClassName: 'TrailServiceClient' }, - api_agent_service: { importClassName: 'AgentServiceClient' }, + agent_agent_registration_service: { importClassName: 'AgentRegistrationServiceClient', exportClassName: 'LoadTestingAgentRegistrationServiceClient' }, + agent_service: { importClassName: 'AgentServiceClient', exportClassName: 'LoadTestingAgentServiceClient' }, + agent_job_service: { importClassName: 'JobServiceClient', exportClassName: 'LoadTestingJobServiceClient' }, + agent_monitoring_service: { importClassName: 'MonitoringServiceClient', exportClassName: 'LoadTestingMonitoringServiceClient' }, + agent_test_service: { importClassName: 'TestServiceClient', exportClassName: 'LoadTestingTestServiceClient' }, + agent_trail_service: { importClassName: 'TrailServiceClient', exportClassName: 'LoadTestingTrailServiceClient' }, + api_agent_service: { importClassName: 'AgentServiceClient', exportClassName: 'LoadTestingApiAgentServiceClient' }, }, loadbalancer: { network_load_balancer_service: { importClassName: 'NetworkLoadBalancerServiceClient' }, @@ -148,10 +148,10 @@ export const servicesConfig: ServicesConfig = { sink_service: { importClassName: 'SinkServiceClient' }, }, marketplace: { - image_product_usage_service: { importClassName: 'ImageProductUsageServiceClient' }, - licensemanager_instance_service: { importClassName: 'InstanceServiceClient' }, - licensemanager_lock_service: { importClassName: 'LockServiceClient' }, - metering_image_product_usage_service: { importClassName: 'ImageProductUsageServiceClient' }, + image_product_usage_service: { importClassName: 'ImageProductUsageServiceClient', exportClassName: 'MarketplaceImageProductUsageServiceClient' }, + licensemanager_instance_service: { importClassName: 'InstanceServiceClient', exportClassName: 'MarketplaceInstanceServiceClient' }, + licensemanager_lock_service: { importClassName: 'LockServiceClient', exportClassName: 'MarketplaceLockServiceClient' }, + metering_image_product_usage_service: { importClassName: 'ImageProductUsageServiceClient', exportClassName: 'MarketplaceMeteringImageProductUsageServiceClient' }, }, mdb: { clickhouse_backup_service: { importClassName: 'BackupServiceClient', exportClassName: 'ClickHouseBackupServiceClient' }, diff --git a/src/generated/yandex/cloud/service_clients.ts b/src/generated/yandex/cloud/service_clients.ts index adb79e1e..9df95ca0 100644 --- a/src/generated/yandex/cloud/service_clients.ts +++ b/src/generated/yandex/cloud/service_clients.ts @@ -1,7 +1,6 @@ import * as cloudApi from '.' export const TextRecognitionServiceClient = cloudApi.ai.ocr_service.TextRecognitionServiceClient; export const SttServiceClient = cloudApi.ai.stt_service.SttServiceClient; -export const SttServiceClient = cloudApi.ai.stt_service.SttServiceClient; export const TranslationServiceClient = cloudApi.ai.translate_translation_service.TranslationServiceClient; export const SynthesizerClient = cloudApi.ai.tts_service.SynthesizerClient; export const VisionServiceClient = cloudApi.ai.vision_service.VisionServiceClient; @@ -11,21 +10,21 @@ export const HttpRouterServiceClient = cloudApi.apploadbalancer.http_router_serv export const LoadBalancerServiceClient = cloudApi.apploadbalancer.load_balancer_service.LoadBalancerServiceClient; export const AlbTargetGroupServiceClient = cloudApi.apploadbalancer.target_group_service.TargetGroupServiceClient; export const VirtualHostServiceClient = cloudApi.apploadbalancer.virtual_host_service.VirtualHostServiceClient; -export const BackupServiceClient = cloudApi.backup.backup_service.BackupServiceClient; -export const PolicyServiceClient = cloudApi.backup.policy_service.PolicyServiceClient; -export const ProviderServiceClient = cloudApi.backup.provider_service.ProviderServiceClient; -export const ResourceServiceClient = cloudApi.backup.resource_service.ResourceServiceClient; +export const BackupBackupServiceClient = cloudApi.backup.backup_service.BackupServiceClient; +export const BackupPolicyServiceClient = cloudApi.backup.policy_service.PolicyServiceClient; +export const BackupProviderServiceClient = cloudApi.backup.provider_service.ProviderServiceClient; +export const BackupResourceServiceClient = cloudApi.backup.resource_service.ResourceServiceClient; export const BillingAccountServiceClient = cloudApi.billing.billing_account_service.BillingAccountServiceClient; export const BudgetServiceClient = cloudApi.billing.budget_service.BudgetServiceClient; export const CustomerServiceClient = cloudApi.billing.customer_service.CustomerServiceClient; export const ServiceServiceClient = cloudApi.billing.service_service.ServiceServiceClient; export const SkuServiceClient = cloudApi.billing.sku_service.SkuServiceClient; -export const CacheServiceClient = cloudApi.cdn.cache_service.CacheServiceClient; -export const OriginGroupServiceClient = cloudApi.cdn.origin_group_service.OriginGroupServiceClient; -export const OriginServiceClient = cloudApi.cdn.origin_service.OriginServiceClient; -export const ProviderServiceClient = cloudApi.cdn.provider_service.ProviderServiceClient; -export const RawLogsServiceClient = cloudApi.cdn.raw_logs_service.RawLogsServiceClient; -export const ResourceServiceClient = cloudApi.cdn.resource_service.ResourceServiceClient; +export const CDNCacheServiceClient = cloudApi.cdn.cache_service.CacheServiceClient; +export const CDNOriginGroupServiceClient = cloudApi.cdn.origin_group_service.OriginGroupServiceClient; +export const CDNOriginServiceClient = cloudApi.cdn.origin_service.OriginServiceClient; +export const CDNProviderServiceClient = cloudApi.cdn.provider_service.ProviderServiceClient; +export const CDNRawLogsServiceClient = cloudApi.cdn.raw_logs_service.RawLogsServiceClient; +export const CDNResourceServiceClient = cloudApi.cdn.resource_service.ResourceServiceClient; export const CertificateContentServiceClient = cloudApi.certificatemanager.certificate_content_service.CertificateContentServiceClient; export const CertificateServiceClient = cloudApi.certificatemanager.certificate_service.CertificateServiceClient; export const DiskPlacementGroupServiceClient = cloudApi.compute.disk_placement_group_service.DiskPlacementGroupServiceClient; @@ -60,7 +59,6 @@ export const NodeServiceClient = cloudApi.datasphere.node_service.NodeServiceCli export const ProjectDataServiceClient = cloudApi.datasphere.project_data_service.ProjectDataServiceClient; export const ProjectServiceClient = cloudApi.datasphere.project_service.ProjectServiceClient; export const CommunityServiceClient = cloudApi.datasphere.community_service.CommunityServiceClient; -export const ProjectServiceClient = cloudApi.datasphere.project_service.ProjectServiceClient; export const EndpointServiceClient = cloudApi.datatransfer.endpoint_service.EndpointServiceClient; export const TransferServiceClient = cloudApi.datatransfer.transfer_service.TransferServiceClient; export const DnsZoneServiceClient = cloudApi.dns.dns_zone_service.DnsZoneServiceClient; @@ -90,13 +88,13 @@ export const AsymmetricSignatureCryptoServiceClient = cloudApi.kms.asymmetric_si export const AsymmetricSignatureKeyServiceClient = cloudApi.kms.asymmetric_signature_key_service.AsymmetricSignatureKeyServiceClient; export const NetworkLoadBalancerServiceClient = cloudApi.loadbalancer.network_load_balancer_service.NetworkLoadBalancerServiceClient; export const TargetGroupServiceClient = cloudApi.loadbalancer.target_group_service.TargetGroupServiceClient; -export const AgentRegistrationServiceClient = cloudApi.loadtesting.agent_agent_registration_service.AgentRegistrationServiceClient; -export const AgentServiceClient = cloudApi.loadtesting.agent_service.AgentServiceClient; -export const JobServiceClient = cloudApi.loadtesting.agent_job_service.JobServiceClient; -export const MonitoringServiceClient = cloudApi.loadtesting.agent_monitoring_service.MonitoringServiceClient; -export const TestServiceClient = cloudApi.loadtesting.agent_test_service.TestServiceClient; -export const TrailServiceClient = cloudApi.loadtesting.agent_trail_service.TrailServiceClient; -export const AgentServiceClient = cloudApi.loadtesting.api_agent_service.AgentServiceClient; +export const LoadTestingAgentRegistrationServiceClient = cloudApi.loadtesting.agent_agent_registration_service.AgentRegistrationServiceClient; +export const LoadTestingAgentServiceClient = cloudApi.loadtesting.agent_service.AgentServiceClient; +export const LoadTestingJobServiceClient = cloudApi.loadtesting.agent_job_service.JobServiceClient; +export const LoadTestingMonitoringServiceClient = cloudApi.loadtesting.agent_monitoring_service.MonitoringServiceClient; +export const LoadTestingTestServiceClient = cloudApi.loadtesting.agent_test_service.TestServiceClient; +export const LoadTestingTrailServiceClient = cloudApi.loadtesting.agent_trail_service.TrailServiceClient; +export const LoadTestingApiAgentServiceClient = cloudApi.loadtesting.api_agent_service.AgentServiceClient; export const PayloadServiceClient = cloudApi.lockbox.payload_service.PayloadServiceClient; export const SecretServiceClient = cloudApi.lockbox.secret_service.SecretServiceClient; export const ExportServiceClient = cloudApi.logging.export_service.ExportServiceClient; @@ -104,10 +102,10 @@ export const LogGroupServiceClient = cloudApi.logging.log_group_service.LogGroup export const LogIngestionServiceClient = cloudApi.logging.log_ingestion_service.LogIngestionServiceClient; export const LogReadingServiceClient = cloudApi.logging.log_reading_service.LogReadingServiceClient; export const SinkServiceClient = cloudApi.logging.sink_service.SinkServiceClient; -export const InstanceServiceClient = cloudApi.marketplace.licensemanager_instance_service.InstanceServiceClient; -export const LockServiceClient = cloudApi.marketplace.licensemanager_lock_service.LockServiceClient; -export const ImageProductUsageServiceClient = cloudApi.marketplace.metering_image_product_usage_service.ImageProductUsageServiceClient; -export const ImageProductUsageServiceClient = cloudApi.marketplace.image_product_usage_service.ImageProductUsageServiceClient; +export const MarketplaceInstanceServiceClient = cloudApi.marketplace.licensemanager_instance_service.InstanceServiceClient; +export const MarketplaceLockServiceClient = cloudApi.marketplace.licensemanager_lock_service.LockServiceClient; +export const MarketplaceMeteringImageProductUsageServiceClient = cloudApi.marketplace.metering_image_product_usage_service.ImageProductUsageServiceClient; +export const MarketplaceImageProductUsageServiceClient = cloudApi.marketplace.image_product_usage_service.ImageProductUsageServiceClient; export const ClickHouseBackupServiceClient = cloudApi.mdb.clickhouse_backup_service.BackupServiceClient; export const ClickHouseClusterServiceClient = cloudApi.mdb.clickhouse_cluster_service.ClusterServiceClient; export const DatabaseServiceClient = cloudApi.mdb.clickhouse_database_service.DatabaseServiceClient; @@ -185,4 +183,4 @@ export const YdbBackupServiceClient = cloudApi.ydb.backup_service.BackupServiceC export const YdbDatabaseServiceClient = cloudApi.ydb.database_service.DatabaseServiceClient; export const LocationServiceClient = cloudApi.ydb.location_service.LocationServiceClient; export const YdbResourcePresetServiceClient = cloudApi.ydb.resource_preset_service.ResourcePresetServiceClient; -export const StorageTypeServiceClient = cloudApi.ydb.storage_type_service.StorageTypeServiceClient; \ No newline at end of file +export const StorageTypeServiceClient = cloudApi.ydb.storage_type_service.StorageTypeServiceClient; From d3ce81da9c863102915e5319f9e28aa3e7b8823b Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Thu, 14 Sep 2023 16:57:30 +0300 Subject: [PATCH 08/12] fix: added missing endpoints --- src/service-endpoints.ts | 54 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index 9d426654..009a0e27 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -17,6 +17,15 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ serviceIds: ['yandex.cloud.operation.OperationService'], endpoint: 'operation.api.cloud.yandex.net:443', }, + { + serviceIds: [ + 'yandex.cloud.backup.v1.BackupService', + 'yandex.cloud.backup.v1.PolicyService', + 'yandex.cloud.backup.v1.ProviderService', + 'yandex.cloud.backup.v1.ResourceService', + ], + endpoint: 'backup.api.cloud.yandex.net:443', + }, { serviceIds: [ 'yandex.cloud.compute.v1.DiskPlacementGroupService', @@ -32,6 +41,7 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.compute.v1.ZoneService', 'yandex.cloud.compute.v1.instancegroup.InstanceGroupService', 'yandex.cloud.compute.v1.SnapshotScheduleService', + 'yandex.cloud.compute.v1.GpuClusterService', ], endpoint: 'compute.api.cloud.yandex.net:443', }, @@ -94,6 +104,7 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.mdb.postgresql.v1.DatabaseService', 'yandex.cloud.mdb.postgresql.v1.ResourcePresetService', 'yandex.cloud.mdb.postgresql.v1.UserService', + 'yandex.cloud.mdb.postgresql.v1.PerformanceDiagnosticsService', 'yandex.cloud.mdb.redis.v1.BackupService', 'yandex.cloud.mdb.redis.v1.ClusterService', 'yandex.cloud.mdb.redis.v1.ResourcePresetService', @@ -102,6 +113,9 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.mdb.sqlserver.v1.DatabaseService', 'yandex.cloud.mdb.sqlserver.v1.ResourcePresetService', 'yandex.cloud.mdb.sqlserver.v1.UserService', + 'yandex.cloud.mdb.opensearch.v1.BackupService', + 'yandex.cloud.mdb.opensearch.v1.ClusterService', + 'yandex.cloud.mdb.opensearch.v1.ResourcePresetService', ], endpoint: 'mdb.api.cloud.yandex.net:443', }, @@ -132,6 +146,7 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.containerregistry.v1.RegistryService', 'yandex.cloud.containerregistry.v1.RepositoryService', 'yandex.cloud.containerregistry.v1.ScannerService', + 'yandex.cloud.containerregistry.v1.ScanPolicyService', ], endpoint: 'container-registry.api.cloud.yandex.net:443', }, @@ -177,6 +192,8 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ { serviceIds: [ 'yandex.cloud.logging.v1.LogGroupService', + 'yandex.cloud.logging.v1.ExportService', + 'yandex.cloud.logging.v1.SinkService', ], endpoint: 'logging.api.cloud.yandex.net:443', }, @@ -237,11 +254,19 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ endpoint: 'dataproc-manager.api.cloud.yandex.net:443', }, { - serviceIds: ['yandex.cloud.kms.v1.SymmetricKeyService'], + serviceIds: [ + 'yandex.cloud.kms.v1.SymmetricKeyService', + 'yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionKeyService', + 'yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureKeyService', + ], endpoint: 'kms.api.cloud.yandex.net:443', }, { - serviceIds: ['yandex.cloud.kms.v1.SymmetricCryptoService'], + serviceIds: [ + 'yandex.cloud.kms.v1.SymmetricCryptoService', + 'yandex.cloud.kms.v1.asymmetricencryption.AsymmetricEncryptionCryptoService', + 'yandex.cloud.kms.v1.asymmetricsignature.AsymmetricSignatureCryptoService', + ], endpoint: 'kms.yandex:443', }, { @@ -259,6 +284,12 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ ], endpoint: 'vision.api.cloud.yandex.net:443', }, + { + serviceIds: [ + 'yandex.cloud.ai.ocr.v1.TextRecognitionService', + ], + endpoint: 'ocr.api.cloud.yandex.net:443', + }, { serviceIds: ['yandex.cloud.ai.stt.v2.SttService'], endpoint: 'transcribe.api.cloud.yandex.net:443', @@ -312,6 +343,8 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.datasphere.v1.NodeService', 'yandex.cloud.datasphere.v1.ProjectDataService', 'yandex.cloud.datasphere.v1.ProjectService', + 'yandex.cloud.datasphere.v2.ProjectService', + 'yandex.cloud.datasphere.v2.CommunityService', ], endpoint: 'datasphere.api.cloud.yandex.net:443', }, @@ -343,6 +376,9 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ { serviceIds: [ 'yandex.cloud.marketplace.v1.metering.ImageProductUsageService', + 'yandex.cloud.marketplace.licensemanager.v1.InstanceService', + 'yandex.cloud.marketplace.licensemanager.v1.LockService', + 'yandex.cloud.marketplace.metering.v1.ImageProductUsageService', ], endpoint: 'marketplace.api.cloud.yandex.net:443', }, @@ -353,6 +389,8 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.organizationmanager.v1.saml.CertificateService', 'yandex.cloud.organizationmanager.v1.saml.FederationService', 'yandex.cloud.organizationmanager.v1.GroupService', + 'yandex.cloud.organizationmanager.v1.GroupMappingService', + 'yandex.cloud.organizationmanager.v1.SshCertificateService', ], endpoint: 'organization-manager.api.cloud.yandex.net:443', }, @@ -362,6 +400,18 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ ], endpoint: 'storage.api.cloud.yandex.net:443', }, + { + serviceIds: [ + 'yandex.cloud.loadtesting.agent.v1.AgentRegistrationService', + 'yandex.cloud.loadtesting.agent.v1.AgentService', + 'yandex.cloud.loadtesting.agent.v1.JobService', + 'yandex.cloud.loadtesting.agent.v1.MonitoringService', + 'yandex.cloud.loadtesting.agent.v1.TestService', + 'yandex.cloud.loadtesting.agent.v1.TrailService', + 'yandex.cloud.loadtesting.api.v1.AgentService', + ], + endpoint: 'loadtesting.api.cloud.yandex.net:443', + }, ]; export const getServiceClientEndpoint = (generatedClientCtor: GeneratedServiceClientCtor): string => { From 2fe752ac006bfe412e7920f2408111a0becfd18e Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Thu, 14 Sep 2023 17:04:33 +0300 Subject: [PATCH 09/12] fix: typescript types --- src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts | 6 +++--- .../yandex/cloud/mdb/postgresql/v1/cluster_service.ts | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts index 0c42c5d1..020c0959 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts @@ -1,5 +1,5 @@ /* eslint-disable */ -import { messageTypeRegistry } from "../../../../../typeRegistry"; +import { MessageType, messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import _m0 from "protobufjs/minimal"; import { @@ -778,7 +778,7 @@ const baseCluster: object = { hostGroupIds: "", }; -export const Cluster = { +export const Cluster: MessageType = { $type: "yandex.cloud.mdb.postgresql.v1.Cluster" as const, encode( @@ -1274,7 +1274,7 @@ const baseClusterConfig: object = { version: "", }; -export const ClusterConfig = { +export const ClusterConfig: MessageType = { $type: "yandex.cloud.mdb.postgresql.v1.ClusterConfig" as const, encode( diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts index 22ee5df5..da088d77 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts @@ -1,5 +1,5 @@ /* eslint-disable */ -import { messageTypeRegistry } from "../../../../../typeRegistry"; +import { MessageType, messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import { makeGenericClientConstructor, @@ -1101,7 +1101,7 @@ const baseListClustersResponse: object = { nextPageToken: "", }; -export const ListClustersResponse = { +export const ListClustersResponse: MessageType = { $type: "yandex.cloud.mdb.postgresql.v1.ListClustersResponse" as const, encode( From 0907b2627c5c31159aa5fc37a48dd8e9b61e25c3 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Thu, 14 Sep 2023 17:09:28 +0300 Subject: [PATCH 10/12] test: tests pattern --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index b9261fc7..3a40f306 100644 --- a/package.json +++ b/package.json @@ -66,7 +66,7 @@ "typescript": "^4.5.4" }, "scripts": { - "test": "jest -c config/jest.ts --passWithNoTests", + "test": "jest -c config/jest.ts --passWithNoTests '.*\\.test\\.ts$'", "lint": "eslint src config", "build": "cross-env NODE_OPTIONS=\"--max-old-space-size=4096\" tsc -p .", "generate-code": "ts-node scripts/generate-code.ts", From 7c487099b6dfad0075763612de6de8c49c284464 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Thu, 14 Sep 2023 17:18:11 +0300 Subject: [PATCH 11/12] test: increase nodejs memory for tests --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 3a40f306..6ae100cb 100644 --- a/package.json +++ b/package.json @@ -66,7 +66,7 @@ "typescript": "^4.5.4" }, "scripts": { - "test": "jest -c config/jest.ts --passWithNoTests '.*\\.test\\.ts$'", + "test": "cross-env NODE_OPTIONS=\"--max-old-space-size=4096\" jest -c config/jest.ts --passWithNoTests '.*\\.test\\.ts$'", "lint": "eslint src config", "build": "cross-env NODE_OPTIONS=\"--max-old-space-size=4096\" tsc -p .", "generate-code": "ts-node scripts/generate-code.ts", From f8a89373912aaa7d7d07cd7fb994e20c8a878047 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Thu, 14 Sep 2023 14:27:06 +0000 Subject: [PATCH 12/12] chore(release): 2.5.0 [skip ci] # [2.5.0](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.4.9...v2.5.0) (2023-09-14) ### Bug Fixes * added missing endpoints ([d3ce81d](https://github.com/yandex-cloud/nodejs-sdk/commit/d3ce81da9c863102915e5319f9e28aa3e7b8823b)) * typescript types ([2fe752a](https://github.com/yandex-cloud/nodejs-sdk/commit/2fe752ac006bfe412e7920f2408111a0becfd18e)) ### Features * update cloudapi ([45b3aba](https://github.com/yandex-cloud/nodejs-sdk/commit/45b3aba15623c30037afafd761946faae51cad00)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 921b4027..8d9a1dd5 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.9", + "version": "2.5.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.9", + "version": "2.5.0", "license": "MIT", "dependencies": { "@grpc/grpc-js": "^1.6.12", diff --git a/package.json b/package.json index 6ae100cb..0a01db09 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.9", + "version": "2.5.0", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud",