From 03f39e2263fb154f1b161b37f3913f01ee3a0532 Mon Sep 17 00:00:00 2001 From: figbot <82115609+withfig-bot@users.noreply.github.com> Date: Tue, 30 Jul 2024 17:16:49 -0700 Subject: [PATCH] feat: update spec --- src/aws.ts | 8 +- src/aws/application-signals.ts | 22 +-- src/aws/appstream.ts | 8 +- src/aws/bedrock-runtime.ts | 2 +- src/aws/cleanrooms.ts | 269 ++++++++++++++++++++++++---- src/aws/codepipeline.ts | 170 ++++++++++++++++++ src/aws/datazone.ts | 39 ++++ src/aws/dynamodb.ts | 6 +- src/aws/ecr.ts | 296 ++++++++++++++++++++++++++++++- src/aws/eks.ts | 16 ++ src/aws/elasticache.ts | 227 ++++++++++++------------ src/aws/elbv2.ts | 83 ++++++++- src/aws/iotsitewise.ts | 25 +-- src/aws/lexv2-models.ts | 4 +- src/aws/medical-imaging.ts | 20 +++ src/aws/memorydb.ts | 8 +- src/aws/network-firewall.ts | 2 +- src/aws/pinpoint-sms-voice-v2.ts | 64 +++---- src/aws/rolesanywhere.ts | 20 +++ src/aws/stepfunctions.ts | 63 ++++++- src/aws/tnb.ts | 31 +++- src/aws/workspaces.ts | 2 +- 22 files changed, 1143 insertions(+), 242 deletions(-) diff --git a/src/aws.ts b/src/aws.ts index f4cab367ab04..c22037f17b92 100644 --- a/src/aws.ts +++ b/src/aws.ts @@ -135,7 +135,7 @@ const completionSpec: Fig.Spec = { { name: "application-signals", description: - "This is a Preview release of the Application Signals API Reference. Operations and parameters are subject to change before the general availability release. Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. The application-centric view provides you with unified visibility across your applications, services, and dependencies, so you can proactively monitor and efficiently triage any issues that may arise, ensuring optimal customer experience. Application Signals provides the following benefits: Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors. Create and monitor service level objectives (SLOs). See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity", + "Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. The application-centric view provides you with unified visibility across your applications, services, and dependencies, so you can proactively monitor and efficiently triage any issues that may arise, ensuring optimal customer experience. Application Signals provides the following benefits: Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors. Create and monitor service level objectives (SLOs). See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity. Application Signals works with CloudWatch RUM, CloudWatch Synthetics canaries, and Amazon Web Services Service Catalog AppRegistry, to display your client pages, Synthetics canaries, and application names within dashboards and maps", loadSpec: "aws/application-signals", }, { @@ -1418,7 +1418,7 @@ const completionSpec: Fig.Spec = { { name: "memorydb", description: - "MemoryDB for Redis is a fully managed, Redis-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis, a popular open source data store, enabling you to leverage Redis\u2019 flexible and friendly data structures, APIs, and commands", + "MemoryDB is a fully managed, Redis OSS-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis OSS, a popular open source data store, enabling you to leverage Redis OSS\u2019 flexible and friendly data structures, APIs, and commands", loadSpec: "aws/memorydb", }, { @@ -1647,7 +1647,7 @@ const completionSpec: Fig.Spec = { { name: "pinpoint-sms-voice-v2", description: - "Welcome to the Amazon Pinpoint SMS and Voice, version 2 API Reference. This guide provides information about Amazon Pinpoint SMS and Voice, version 2 API resources, including supported HTTP methods, parameters, and schemas. Amazon Pinpoint is an Amazon Web Services service that you can use to engage with your recipients across multiple messaging channels. The Amazon Pinpoint SMS and Voice, version 2 API provides programmatic access to options that are unique to the SMS and voice channels. Amazon Pinpoint SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API. If you're new to Amazon Pinpoint SMS, it's also helpful to review the Amazon Pinpoint SMS User Guide. The Amazon Pinpoint Developer Guide provides tutorials, code samples, and procedures that demonstrate how to use Amazon Pinpoint SMS features programmatically and how to integrate Amazon Pinpoint functionality into mobile apps and other types of applications. The guide also provides key information, such as Amazon Pinpoint integration with other Amazon Web Services services, and the quotas that apply to use of the service. Regional availability The Amazon Pinpoint SMS and Voice, version 2 API Reference is available in several Amazon Web Services Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Pinpoint endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference. In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure", + "Welcome to the AWS End User Messaging SMS and Voice, version 2 API Reference. This guide provides information about AWS End User Messaging SMS and Voice, version 2 API resources, including supported HTTP methods, parameters, and schemas. Amazon Pinpoint is an Amazon Web Services service that you can use to engage with your recipients across multiple messaging channels. The AWS End User Messaging SMS and Voice, version 2 API provides programmatic access to options that are unique to the SMS and voice channels. AWS End User Messaging SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API. If you're new to AWS End User Messaging SMS and Voice, it's also helpful to review the AWS End User Messaging SMS User Guide. The AWS End User Messaging SMS User Guide provides tutorials, code samples, and procedures that demonstrate how to use AWS End User Messaging SMS and Voice features programmatically and how to integrate functionality into mobile apps and other types of applications. The guide also provides key information, such as AWS End User Messaging SMS and Voice integration with other Amazon Web Services services, and the quotas that apply to use of the service. Regional availability The AWS End User Messaging SMS and Voice version 2 API Reference is available in several Amazon Web Services Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Pinpoint endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference. In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure", loadSpec: "aws/pinpoint-sms-voice-v2", }, { @@ -2084,7 +2084,7 @@ const completionSpec: Fig.Spec = { { name: "stepfunctions", description: - "Step Functions Step Functions is a service that lets you coordinate the components of distributed applications and microservices using visual workflows. You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues. Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API. For more information about Step Functions, see the Step Functions Developer Guide . If you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution and specify its parameter as StateMachineArn", + "Step Functions Step Functions coordinates the components of distributed applications and microservices using visual workflows. You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues. Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API. For more information about Step Functions, see the Step Functions Developer Guide . If you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution and specify its parameter as StateMachineArn", loadSpec: "aws/stepfunctions", }, { diff --git a/src/aws/application-signals.ts b/src/aws/application-signals.ts index a94168e69503..16cfd7465e23 100644 --- a/src/aws/application-signals.ts +++ b/src/aws/application-signals.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "application-signals", description: - "This is a Preview release of the Application Signals API Reference. Operations and parameters are subject to change before the general availability release. Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. The application-centric view provides you with unified visibility across your applications, services, and dependencies, so you can proactively monitor and efficiently triage any issues that may arise, ensuring optimal customer experience. Application Signals provides the following benefits: Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors. Create and monitor service level objectives (SLOs). See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity", + "Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. The application-centric view provides you with unified visibility across your applications, services, and dependencies, so you can proactively monitor and efficiently triage any issues that may arise, ensuring optimal customer experience. Application Signals provides the following benefits: Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors. Create and monitor service level objectives (SLOs). See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity. Application Signals works with CloudWatch RUM, CloudWatch Synthetics canaries, and Amazon Web Services Service Catalog AppRegistry, to display your client pages, Synthetics canaries, and application names within dashboards and maps", subcommands: [ { name: "batch-get-service-level-objective-budget-report", @@ -144,7 +144,7 @@ const completionSpec: Fig.Spec = { { name: "--start-time", description: - "The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057", + "The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour", args: { name: "timestamp", }, @@ -152,7 +152,7 @@ const completionSpec: Fig.Spec = { { name: "--end-time", description: - "The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057", + "The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour", args: { name: "timestamp", }, @@ -223,7 +223,7 @@ const completionSpec: Fig.Spec = { { name: "--start-time", description: - "The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057", + "The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour", args: { name: "timestamp", }, @@ -231,7 +231,7 @@ const completionSpec: Fig.Spec = { { name: "--end-time", description: - "The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057", + "The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested end time will be rounded to the nearest hour", args: { name: "timestamp", }, @@ -311,7 +311,7 @@ const completionSpec: Fig.Spec = { { name: "--start-time", description: - "The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057", + "The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour", args: { name: "timestamp", }, @@ -319,7 +319,7 @@ const completionSpec: Fig.Spec = { { name: "--end-time", description: - "The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057", + "The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour", args: { name: "timestamp", }, @@ -478,7 +478,7 @@ const completionSpec: Fig.Spec = { { name: "--start-time", description: - "The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057", + "The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour", args: { name: "timestamp", }, @@ -486,7 +486,7 @@ const completionSpec: Fig.Spec = { { name: "--end-time", description: - "The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057", + "The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested end time will be rounded to the nearest hour", args: { name: "timestamp", }, @@ -566,7 +566,7 @@ const completionSpec: Fig.Spec = { { name: "--start-time", description: - "The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057", + "The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour", args: { name: "timestamp", }, @@ -574,7 +574,7 @@ const completionSpec: Fig.Spec = { { name: "--end-time", description: - "The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057", + "The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your requested start time will be rounded to the nearest hour", args: { name: "timestamp", }, diff --git a/src/aws/appstream.ts b/src/aws/appstream.ts index 22881f57f11a..c753c481440d 100644 --- a/src/aws/appstream.ts +++ b/src/aws/appstream.ts @@ -792,7 +792,7 @@ const completionSpec: Fig.Spec = { { name: "--disconnect-timeout-in-seconds", description: - "The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 360000", + "The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 36000", args: { name: "integer", }, @@ -840,7 +840,7 @@ const completionSpec: Fig.Spec = { { name: "--idle-disconnect-timeout-in-seconds", description: - "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity", + "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity", args: { name: "integer", }, @@ -3888,7 +3888,7 @@ const completionSpec: Fig.Spec = { { name: "--disconnect-timeout-in-seconds", description: - "The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 360000", + "The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance. Specify a value between 60 and 36000", args: { name: "integer", }, @@ -3936,7 +3936,7 @@ const completionSpec: Fig.Spec = { { name: "--idle-disconnect-timeout-in-seconds", description: - "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity", + "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity", args: { name: "integer", }, diff --git a/src/aws/bedrock-runtime.ts b/src/aws/bedrock-runtime.ts index bb9a7ffa75d1..695aebf0a95c 100644 --- a/src/aws/bedrock-runtime.ts +++ b/src/aws/bedrock-runtime.ts @@ -61,7 +61,7 @@ const completionSpec: Fig.Spec = { { name: "converse", description: - "Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, see Converse API examples in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action", + "Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. If a model has unique inference parameters, you can also pass those unique parameters to the model. Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, see Converse API examples in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action", options: [ { name: "--model-id", diff --git a/src/aws/cleanrooms.ts b/src/aws/cleanrooms.ts index f6eded8b9696..dc603351c15b 100644 --- a/src/aws/cleanrooms.ts +++ b/src/aws/cleanrooms.ts @@ -450,7 +450,7 @@ const completionSpec: Fig.Spec = { { name: "--analysis-rule-policy", description: - "The entire created configured table analysis rule object", + "The analysis rule policy that was created for the configured table", args: { name: "structure", }, @@ -545,6 +545,61 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-configured-table-association-analysis-rule", + description: + "Creates a new analysis rule for an associated configured table", + options: [ + { + name: "--membership-identifier", + description: + "A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID", + args: { + name: "string", + }, + }, + { + name: "--configured-table-association-identifier", + description: + "The unique ID for the configured table association. Currently accepts the configured table association ID", + args: { + name: "string", + }, + }, + { + name: "--analysis-rule-type", + description: "The type of analysis rule", + args: { + name: "string", + }, + }, + { + name: "--analysis-rule-policy", + description: + "The analysis rule policy that was created for the configured table association", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-id-mapping-table", description: "Creates an ID mapping table", @@ -1024,6 +1079,53 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-configured-table-association-analysis-rule", + description: + "Deletes an analysis rule for a configured table association", + options: [ + { + name: "--membership-identifier", + description: + "A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID", + args: { + name: "string", + }, + }, + { + name: "--configured-table-association-identifier", + description: + "The identi\ufb01er for the con\ufb01gured table association that's related to the analysis rule that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--analysis-rule-type", + description: "The type of the analysis rule that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-id-mapping-table", description: "Deletes an ID mapping table", @@ -1582,6 +1684,53 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-configured-table-association-analysis-rule", + description: + "Retrieves the analysis rule for a configured table association", + options: [ + { + name: "--membership-identifier", + description: + "A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID", + args: { + name: "string", + }, + }, + { + name: "--configured-table-association-identifier", + description: + "The identi\ufb01er for the con\ufb01gured table association that's related to the analysis rule", + args: { + name: "string", + }, + }, + { + name: "--analysis-rule-type", + description: "The type of analysis rule that you want to retrieve", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-id-mapping-table", description: "Retrieves an ID mapping table", @@ -1865,7 +2014,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -1873,7 +2022,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call", args: { name: "integer", }, @@ -1936,7 +2085,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -1944,7 +2093,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call", args: { name: "integer", }, @@ -2008,7 +2157,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -2016,7 +2165,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call", args: { name: "integer", }, @@ -2151,7 +2300,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -2159,7 +2308,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", args: { name: "integer", }, @@ -2229,7 +2378,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", args: { name: "integer", }, @@ -2237,7 +2386,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -2293,7 +2442,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -2301,7 +2450,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", args: { name: "integer", }, @@ -2372,7 +2521,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -2380,7 +2529,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", args: { name: "integer", }, @@ -2443,7 +2592,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -2451,7 +2600,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call", args: { name: "integer", }, @@ -2506,7 +2655,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -2514,7 +2663,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call", args: { name: "integer", }, @@ -2719,7 +2868,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -2727,7 +2876,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call", args: { name: "integer", }, @@ -2783,7 +2932,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -2791,7 +2940,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call", args: { name: "integer", }, @@ -2863,7 +3012,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -2871,7 +3020,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", args: { name: "integer", }, @@ -2942,7 +3091,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -2950,7 +3099,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", args: { name: "integer", }, @@ -3019,7 +3168,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -3027,7 +3176,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service can return a nextToken even if the maximum results has not been met", args: { name: "integer", }, @@ -3089,7 +3238,8 @@ const completionSpec: Fig.Spec = { }, { name: "--schema-type", - description: "If present, filter schemas by schema type", + description: + "If present, filter schemas by schema type. The only valid schema type is currently `TABLE`", args: { name: "string", }, @@ -3097,7 +3247,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The pagination token that's used to fetch the next set of results", + "The token value retrieved from a previous call to access the next page of results", args: { name: "string", }, @@ -3105,7 +3255,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", + "The maximum size of the results that is returned per call", args: { name: "integer", }, @@ -3684,6 +3834,61 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-configured-table-association-analysis-rule", + description: + "Updates the analysis rule for a configured table association", + options: [ + { + name: "--membership-identifier", + description: + "A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID", + args: { + name: "string", + }, + }, + { + name: "--configured-table-association-identifier", + description: + "The identifier for the configured table association to update", + args: { + name: "string", + }, + }, + { + name: "--analysis-rule-type", + description: "The analysis rule type that you want to update", + args: { + name: "string", + }, + }, + { + name: "--analysis-rule-policy", + description: + "The updated analysis rule policy for the con\ufb01gured table association", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-id-mapping-table", description: diff --git a/src/aws/codepipeline.ts b/src/aws/codepipeline.ts index de4d32b1db1c..bed36f4099f8 100644 --- a/src/aws/codepipeline.ts +++ b/src/aws/codepipeline.ts @@ -982,6 +982,123 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-rule-executions", + description: + "Lists the rule executions that have occurred in a pipeline configured for conditions with rules", + options: [ + { + name: "--pipeline-name", + description: + "The name of the pipeline for which you want to get execution summary information", + args: { + name: "string", + }, + }, + { + name: "--filter", + description: + "Input information used to filter rule execution history", + args: { + name: "structure", + }, + }, + { + name: "--max-results", + description: + "The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Pipeline history is limited to the most recent 12 months, based on pipeline execution start times. Default value is 100", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "The token that was returned from the previous ListRuleExecutions call, which can be used to return the next set of rule executions in the list", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-rule-types", + description: "Lists the rules for the condition", + options: [ + { + name: "--rule-owner-filter", + description: "The rule owner to filter on", + args: { + name: "string", + }, + }, + { + name: "--region-filter", + description: "The rule Region to filter on", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-tags-for-resource", description: @@ -1118,6 +1235,59 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "override-stage-condition", + description: "Used to override a stage condition", + options: [ + { + name: "--pipeline-name", + description: + "The name of the pipeline with the stage that will override the condition", + args: { + name: "string", + }, + }, + { + name: "--stage-name", + description: "The name of the stage for the override", + args: { + name: "string", + }, + }, + { + name: "--pipeline-execution-id", + description: "The ID of the pipeline execution for the override", + args: { + name: "string", + }, + }, + { + name: "--condition-type", + description: + "The type of condition to override for the stage, such as entry conditions, failure conditions, or success conditions", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "poll-for-jobs", description: diff --git a/src/aws/datazone.ts b/src/aws/datazone.ts index 1fa0eac3f982..b60dd9d16531 100644 --- a/src/aws/datazone.ts +++ b/src/aws/datazone.ts @@ -3001,6 +3001,45 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-environment-credentials", + description: "Gets the credentials of an environment in Amazon DataZone", + options: [ + { + name: "--domain-identifier", + description: + "The ID of the Amazon DataZone domain in which this environment and its credentials exist", + args: { + name: "string", + }, + }, + { + name: "--environment-identifier", + description: + "The ID of the environment whose credentials this operation gets", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-environment-profile", description: "Gets an evinronment profile in Amazon DataZone", diff --git a/src/aws/dynamodb.ts b/src/aws/dynamodb.ts index 3f8e523e8ba4..26a6f871853d 100644 --- a/src/aws/dynamodb.ts +++ b/src/aws/dynamodb.ts @@ -6,7 +6,7 @@ const completionSpec: Fig.Spec = { { name: "batch-execute-statement", description: - "This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch returns at most a single item. The entire batch must consist of either read statements or write statements, you cannot mix both in one batch. A HTTP 200 response does not mean that all statements in the BatchExecuteStatement succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each statement", + "This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch returns at most a single item. For more information, see Running batch operations with PartiQL for DynamoDB . The entire batch must consist of either read statements or write statements, you cannot mix both in one batch. A HTTP 200 response does not mean that all statements in the BatchExecuteStatement succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each statement", options: [ { name: "--statements", @@ -86,7 +86,7 @@ const completionSpec: Fig.Spec = { { name: "batch-write-item", description: - "The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types. BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action. The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response. If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application. Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write operation: One or more tables specified in the BatchWriteItem request does not exist. Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema. You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request. Your request contains at least two items with identical hash and range keys (which essentially is two put operations). There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. The total request size exceeds 16 MB. Any individual items with keys exceeding the key length limits. For a partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes", + "The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types. BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action. The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. For tables and indexes with provisioned capacity, if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. For all tables and indexes, if none of the items can be processed due to other throttling scenarios (such as exceeding partition level limits), then BatchWriteItem returns a ThrottlingException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response. If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application. Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write operation: One or more tables specified in the BatchWriteItem request does not exist. Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema. You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request. Your request contains at least two items with identical hash and range keys (which essentially is two put operations). There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. The total request size exceeds 16 MB. Any individual items with keys exceeding the key length limits. For a partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes", options: [ { name: "--request-items", @@ -532,7 +532,7 @@ const completionSpec: Fig.Spec = { { name: "delete-table", description: - "The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. When you delete a table, any indexes on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use the DescribeTable action to check the status of the table", + "The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. For the full list of table states, see TableStatus. When you delete a table, any indexes on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use the DescribeTable action to check the status of the table", options: [ { name: "--table-name", diff --git a/src/aws/ecr.ts b/src/aws/ecr.ts index d216b683f123..48396941111f 100644 --- a/src/aws/ecr.ts +++ b/src/aws/ecr.ts @@ -255,7 +255,7 @@ const completionSpec: Fig.Spec = { { name: "--upstream-registry-url", description: - "The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry. Amazon ECR Public (ecr-public) - public.ecr.aws Docker Hub (docker-hub) - registry-1.docker.io Quay (quay) - quay.io Kubernetes (k8s) - registry.k8s.io GitHub Container Registry (github-container-registry) - ghcr.io Microsoft Azure Container Registry (azure-container-registry) - .azurecr.io GitLab Container Registry (gitlab-container-registry) - registry.gitlab.com", + "The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry. Amazon ECR Public (ecr-public) - public.ecr.aws Docker Hub (docker-hub) - registry-1.docker.io Quay (quay) - quay.io Kubernetes (k8s) - registry.k8s.io GitHub Container Registry (github-container-registry) - ghcr.io Microsoft Azure Container Registry (azure-container-registry) - .azurecr.io", args: { name: "string", }, @@ -374,6 +374,101 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-repository-creation-template", + description: + "Creates a repository creation template. This template is used to define the settings for repositories created by Amazon ECR on your behalf. For example, repositories created through pull through cache actions. For more information, see Private repository creation templates in the Amazon Elastic Container Registry User Guide", + options: [ + { + name: "--prefix", + description: + "The repository namespace prefix to associate with the template. All repositories created using this namespace prefix will have the settings defined in this template applied. For example, a prefix of prod would apply to all repositories beginning with prod/. Similarly, a prefix of prod/team would apply to all repositories beginning with prod/team/. To apply a template to all repositories in your registry that don't have an associated creation template, you can use ROOT as the prefix. There is always an assumed / applied to the end of the prefix. If you specify ecr-public as the prefix, Amazon ECR treats that as ecr-public/. When using a pull through cache rule, the repository prefix you specify during rule creation is what you should specify as your repository creation template prefix as well", + args: { + name: "string", + }, + }, + { + name: "--description", + description: "A description for the repository creation template", + args: { + name: "string", + }, + }, + { + name: "--encryption-configuration", + description: + "The encryption configuration to use for repositories created using the template", + args: { + name: "structure", + }, + }, + { + name: "--resource-tags", + description: + "The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters", + args: { + name: "list", + }, + }, + { + name: "--image-tag-mutability", + description: + "The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten", + args: { + name: "string", + }, + }, + { + name: "--repository-policy", + description: + "The repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions", + args: { + name: "string", + }, + }, + { + name: "--lifecycle-policy", + description: + "The lifecycle policy to use for repositories created using the template", + args: { + name: "string", + }, + }, + { + name: "--applied-for", + description: + "A list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION", + args: { + name: "list", + }, + }, + { + name: "--custom-role-arn", + description: + "The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as the registry that you are configuring", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-lifecycle-policy", description: @@ -524,6 +619,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-repository-creation-template", + description: "Deletes a repository creation template", + options: [ + { + name: "--prefix", + description: + "The repository namespace prefix associated with the repository creation template", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-repository-policy", description: @@ -973,6 +1099,78 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "describe-repository-creation-templates", + description: + "Returns details about the repository creation templates in a registry. The prefixes request parameter can be used to return the details for a specific repository creation template", + options: [ + { + name: "--prefixes", + description: + "The repository namespace prefixes associated with the repository creation templates to describe. If this value is not specified, all repository creation templates are returned", + args: { + name: "list", + }, + }, + { + name: "--next-token", + description: + "The nextToken value returned from a previous paginated DescribeRepositoryCreationTemplates request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of repository results returned by DescribeRepositoryCreationTemplatesRequest in paginated output. When this parameter is used, DescribeRepositoryCreationTemplatesRequest only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeRepositoryCreationTemplatesRequest request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeRepositoryCreationTemplatesRequest returns up to 100 results and a nextToken value, if applicable", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-authorization-token", description: @@ -1717,7 +1915,7 @@ const completionSpec: Fig.Spec = { { name: "put-replication-configuration", description: - "Creates or updates the replication configuration for a registry. The existing replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide. When configuring cross-account replication, the destination account must grant the source account permission to replicate. This permission is controlled using a registry permissions policy. For more information, see PutRegistryPolicy", + "Creates or updates the replication configuration for a registry. The existing replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide. For more information on the custom role for replication, see Creating an IAM role for replication. When configuring cross-account replication, the destination account must grant the source account permission to replicate. This permission is controlled using a registry permissions policy. For more information, see PutRegistryPolicy", options: [ { name: "--replication-configuration", @@ -2023,6 +2221,100 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-repository-creation-template", + description: "Updates an existing repository creation template", + options: [ + { + name: "--prefix", + description: + "The repository namespace prefix that matches an existing repository creation template in the registry. All repositories created using this namespace prefix will have the settings defined in this template applied. For example, a prefix of prod would apply to all repositories beginning with prod/. This includes a repository named prod/team1 as well as a repository named prod/repository1. To apply a template to all repositories in your registry that don't have an associated creation template, you can use ROOT as the prefix", + args: { + name: "string", + }, + }, + { + name: "--description", + description: "A description for the repository creation template", + args: { + name: "string", + }, + }, + { + name: "--encryption-configuration", + description: + "The encryption configuration to associate with the repository creation template", + args: { + name: "structure", + }, + }, + { + name: "--resource-tags", + description: + "The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters", + args: { + name: "list", + }, + }, + { + name: "--image-tag-mutability", + description: + "Updates the tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten", + args: { + name: "string", + }, + }, + { + name: "--repository-policy", + description: + "Updates the repository policy created using the template. A repository policy is a permissions policy associated with a repository to control access permissions", + args: { + name: "string", + }, + }, + { + name: "--lifecycle-policy", + description: + "Updates the lifecycle policy associated with the specified repository creation template", + args: { + name: "string", + }, + }, + { + name: "--applied-for", + description: + "Updates the list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION", + args: { + name: "list", + }, + }, + { + name: "--custom-role-arn", + description: + "The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as the registry that you are configuring", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "upload-layer-part", description: diff --git a/src/aws/eks.ts b/src/aws/eks.ts index 6bcd80ef9d1f..2c4470d3a651 100644 --- a/src/aws/eks.ts +++ b/src/aws/eks.ts @@ -753,6 +753,14 @@ const completionSpec: Fig.Spec = { description: "If you set this value to False when creating a cluster, the default networking add-ons will not be installed. The default networking addons include vpc-cni, coredns, and kube-proxy. Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons", }, + { + name: "--upgrade-policy", + description: + "New clusters, by default, have extended support enabled. You can disable extended support when creating a cluster by setting this value to STANDARD", + args: { + name: "structure", + }, + }, { name: "--kubernetes-version", description: @@ -3419,6 +3427,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--upgrade-policy", + description: + "You can enable or disable extended support for clusters currently on standard support. You cannot disable extended support once it starts. You must enable extended support before your cluster exits standard support", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/elasticache.ts b/src/aws/elasticache.ts index 8932dc700e1b..c59df62fc094 100644 --- a/src/aws/elasticache.ts +++ b/src/aws/elasticache.ts @@ -224,12 +224,12 @@ const completionSpec: Fig.Spec = { { name: "copy-serverless-cache-snapshot", description: - "Creates a copy of an existing serverless cache\u2019s snapshot. Available for Redis only", + "Creates a copy of an existing serverless cache\u2019s snapshot. Available for Redis OSS and Serverless Memcached only", options: [ { name: "--source-serverless-cache-snapshot-name", description: - "The identifier of the existing serverless cache\u2019s snapshot to be copied. Available for Redis only", + "The identifier of the existing serverless cache\u2019s snapshot to be copied. Available for Redis OSS and Serverless Memcached only", args: { name: "string", }, @@ -237,7 +237,7 @@ const completionSpec: Fig.Spec = { { name: "--target-serverless-cache-snapshot-name", description: - "The identifier for the snapshot to be created. Available for Redis only", + "The identifier for the snapshot to be created. Available for Redis OSS and Serverless Memcached only", args: { name: "string", }, @@ -245,7 +245,7 @@ const completionSpec: Fig.Spec = { { name: "--kms-key-id", description: - "The identifier of the KMS key used to encrypt the target snapshot. Available for Redis only", + "The identifier of the KMS key used to encrypt the target snapshot. Available for Redis OSS and Serverless Memcached only", args: { name: "string", }, @@ -253,7 +253,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - "A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis only. Default: NULL", + "A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only. Default: NULL", args: { name: "list", }, @@ -280,7 +280,7 @@ const completionSpec: Fig.Spec = { { name: "copy-snapshot", description: - "Makes a copy of an existing snapshot. This operation is valid for Redis only. Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control. You could receive the following error messages. Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s is not owned by the authenticated user. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The authenticated user does not have sufficient permissions to perform the desired activity. Solution: Contact your system administrator to get the needed permissions. Error Message: The S3 bucket %s already contains an object with key %s. Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName. Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket. Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket. Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket. Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide", + "Makes a copy of an existing snapshot. This operation is valid for Redis OSS only. Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control. You could receive the following error messages. Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s is not owned by the authenticated user. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The authenticated user does not have sufficient permissions to perform the desired activity. Solution: Contact your system administrator to get the needed permissions. Error Message: The S3 bucket %s already contains an object with key %s. Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName. Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket. Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket. Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket. Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide", options: [ { name: "--source-snapshot-name", @@ -344,7 +344,7 @@ const completionSpec: Fig.Spec = { { name: "create-cache-cluster", description: - "Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached or Redis. This operation is not supported for Redis (cluster mode enabled) clusters", + "Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached or Redis OSS. This operation is not supported for Redis OSS (cluster mode enabled) clusters", options: [ { name: "--cache-cluster-id", @@ -389,7 +389,7 @@ const completionSpec: Fig.Spec = { { name: "--num-cache-nodes", description: - "The initial number of cache nodes that the cluster has. For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/", + "The initial number of cache nodes that the cluster has. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/", args: { name: "integer", }, @@ -397,7 +397,7 @@ const completionSpec: Fig.Spec = { { name: "--cache-node-type", description: - "The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later", + "The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later", args: { name: "string", }, @@ -460,7 +460,7 @@ const completionSpec: Fig.Spec = { { name: "--snapshot-arns", description: - "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas. This parameter is only valid if the Engine parameter is redis. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb", + "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas. This parameter is only valid if the Engine parameter is redis. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb", args: { name: "list", }, @@ -468,7 +468,7 @@ const completionSpec: Fig.Spec = { { name: "--snapshot-name", description: - "The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis", + "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis", args: { name: "string", }, @@ -500,12 +500,12 @@ const completionSpec: Fig.Spec = { { name: "--auto-minor-version-upgrade", description: - "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", + "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", }, { name: "--no-auto-minor-version-upgrade", description: - "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", + "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", }, { name: "--snapshot-retention-limit", @@ -573,7 +573,7 @@ const completionSpec: Fig.Spec = { { name: "--network-type", description: - "Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", + "Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", args: { name: "string", }, @@ -581,7 +581,7 @@ const completionSpec: Fig.Spec = { { name: "--ip-discovery", description: - "The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", + "The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", args: { name: "string", }, @@ -764,7 +764,7 @@ const completionSpec: Fig.Spec = { { name: "create-global-replication-group", description: - "Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis, you can create cross-region read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore. The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster", + "Global Datastore for Redis OSS offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis OSS, you can create cross-region read replica clusters for ElastiCache (Redis OSS) to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore. The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster", options: [ { name: "--global-replication-group-id-suffix", @@ -811,7 +811,7 @@ const completionSpec: Fig.Spec = { { name: "create-replication-group", description: - "Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group. This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore. A Redis (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas. A Redis cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed. The node or shard limit can be increased to a maximum of 500 per cluster if the Redis engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster. To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type. When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' scaling. For more information, see Scaling ElastiCache for Redis Clusters in the ElastiCache User Guide. This operation is valid for Redis only", + "Creates a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication group. This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore. A Redis OSS (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas. A Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed. The node or shard limit can be increased to a maximum of 500 per cluster if the Redis OSS engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster. To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type. When a Redis OSS (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can use ElastiCache (Redis OSS) scaling. For more information, see Scaling ElastiCache (Redis OSS) Clusters in the ElastiCache User Guide. This operation is valid for Redis OSS only", options: [ { name: "--replication-group-id", @@ -846,12 +846,12 @@ const completionSpec: Fig.Spec = { { name: "--automatic-failover-enabled", description: - "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups. Default: false", + "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled must be enabled for Redis OSS (cluster mode enabled) replication groups. Default: false", }, { name: "--no-automatic-failover-enabled", description: - "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups. Default: false", + "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled must be enabled for Redis OSS (cluster mode enabled) replication groups. Default: false", }, { name: "--multi-az-enabled", @@ -882,7 +882,7 @@ const completionSpec: Fig.Spec = { { name: "--num-node-groups", description: - "An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1. Default: 1", + "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1. Default: 1", args: { name: "integer", }, @@ -898,7 +898,7 @@ const completionSpec: Fig.Spec = { { name: "--node-group-configuration", description: - "A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, it is required when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group (shard) using this parameter because you must specify the slots for each node group", + "A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, it is required when seeding a Redis OSS (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group (shard) using this parameter because you must specify the slots for each node group", args: { name: "list", }, @@ -906,7 +906,7 @@ const completionSpec: Fig.Spec = { { name: "--cache-node-type", description: - "The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later", + "The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later", args: { name: "string", }, @@ -930,7 +930,7 @@ const completionSpec: Fig.Spec = { { name: "--cache-parameter-group-name", description: - "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. If you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name. To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on", + "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. If you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name. To create a Redis OSS (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. To create a Redis OSS (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on", args: { name: "string", }, @@ -970,7 +970,7 @@ const completionSpec: Fig.Spec = { { name: "--snapshot-arns", description: - "A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb", + "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb", args: { name: "list", }, @@ -1010,12 +1010,12 @@ const completionSpec: Fig.Spec = { { name: "--auto-minor-version-upgrade", description: - "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", + "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", }, { name: "--no-auto-minor-version-upgrade", description: - "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", + "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", }, { name: "--snapshot-retention-limit", @@ -1044,22 +1044,22 @@ const completionSpec: Fig.Spec = { { name: "--transit-encryption-enabled", description: - "A flag that enables in-transit encryption when set to true. This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup", + "A flag that enables in-transit encryption when set to true. This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup", }, { name: "--no-transit-encryption-enabled", description: - "A flag that enables in-transit encryption when set to true. This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup", + "A flag that enables in-transit encryption when set to true. This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup", }, { name: "--at-rest-encryption-enabled", description: - "A flag that enables encryption at rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false", + "A flag that enables encryption at rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false", }, { name: "--no-at-rest-encryption-enabled", description: - "A flag that enables encryption at rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false", + "A flag that enables encryption at rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false", }, { name: "--kms-key-id", @@ -1096,7 +1096,7 @@ const completionSpec: Fig.Spec = { { name: "--network-type", description: - "Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", + "Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", args: { name: "string", }, @@ -1104,7 +1104,7 @@ const completionSpec: Fig.Spec = { { name: "--ip-discovery", description: - "The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", + "The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", args: { name: "string", }, @@ -1112,7 +1112,7 @@ const completionSpec: Fig.Spec = { { name: "--transit-encryption-mode", description: - "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. This process will not trigger the replacement of the replication group", + "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. This process will not trigger the replacement of the replication group", args: { name: "string", }, @@ -1120,7 +1120,7 @@ const completionSpec: Fig.Spec = { { name: "--cluster-mode", description: - "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled", + "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled", args: { name: "string", }, @@ -1128,7 +1128,7 @@ const completionSpec: Fig.Spec = { { name: "--serverless-cache-snapshot-name", description: - "The name of the snapshot used to create a replication group. Available for Redis only", + "The name of the snapshot used to create a replication group. Available for Redis OSS only", args: { name: "string", }, @@ -1223,7 +1223,7 @@ const completionSpec: Fig.Spec = { { name: "--snapshot-arns-to-restore", description: - "The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis only", + "The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis OSS and Serverless Memcached only", args: { name: "list", }, @@ -1239,7 +1239,7 @@ const completionSpec: Fig.Spec = { { name: "--user-group-id", description: - "The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. Default is NULL", + "The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL", args: { name: "string", }, @@ -1255,7 +1255,7 @@ const completionSpec: Fig.Spec = { { name: "--snapshot-retention-limit", description: - "The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis only", + "The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis OSS and Serverless Memcached only", args: { name: "integer", }, @@ -1263,7 +1263,7 @@ const completionSpec: Fig.Spec = { { name: "--daily-snapshot-time", description: - "The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis only", + "The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis OSS and Serverless Memcached only", args: { name: "string", }, @@ -1290,12 +1290,12 @@ const completionSpec: Fig.Spec = { { name: "create-serverless-cache-snapshot", description: - "This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis only", + "This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis OSS and Serverless Memcached only", options: [ { name: "--serverless-cache-snapshot-name", description: - "The name for the snapshot being created. Must be unique for the customer account. Available for Redis only. Must be between 1 and 255 characters", + "The name for the snapshot being created. Must be unique for the customer account. Available for Redis OSS and Serverless Memcached only. Must be between 1 and 255 characters", args: { name: "string", }, @@ -1303,7 +1303,7 @@ const completionSpec: Fig.Spec = { { name: "--serverless-cache-name", description: - "The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis only", + "The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis OSS and Serverless Memcached only", args: { name: "string", }, @@ -1311,7 +1311,7 @@ const completionSpec: Fig.Spec = { { name: "--kms-key-id", description: - "The ID of the KMS key used to encrypt the snapshot. Available for Redis only. Default: NULL", + "The ID of the KMS key used to encrypt the snapshot. Available for Redis OSS and Serverless Memcached only. Default: NULL", args: { name: "string", }, @@ -1319,7 +1319,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - "A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis only", + "A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only", args: { name: "list", }, @@ -1346,7 +1346,7 @@ const completionSpec: Fig.Spec = { { name: "create-snapshot", description: - "Creates a copy of an entire cluster or replication group at a specific moment in time. This operation is valid for Redis only", + "Creates a copy of an entire cluster or replication group at a specific moment in time. This operation is valid for Redis OSS only", options: [ { name: "--replication-group-id", @@ -1408,7 +1408,7 @@ const completionSpec: Fig.Spec = { { name: "create-user", description: - "For Redis engine version 6.0 onwards: Creates a Redis user. For more information, see Using Role Based Access Control (RBAC)", + "For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user. For more information, see Using Role Based Access Control (RBAC)", options: [ { name: "--user-id", @@ -1491,7 +1491,7 @@ const completionSpec: Fig.Spec = { { name: "create-user-group", description: - "For Redis engine version 6.0 onwards: Creates a Redis user group. For more information, see Using Role Based Access Control (RBAC)", + "For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user group. For more information, see Using Role Based Access Control (RBAC)", options: [ { name: "--user-group-id", @@ -1502,7 +1502,7 @@ const completionSpec: Fig.Spec = { }, { name: "--engine", - description: "The current supported value is Redis", + description: "The current supported value is Redis user", args: { name: "string", }, @@ -1517,7 +1517,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted. Available for Redis only", + "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted. Available for Redis OSS only", args: { name: "list", }, @@ -1563,7 +1563,7 @@ const completionSpec: Fig.Spec = { { name: "--global-node-groups-to-remove", description: - "If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for Redis will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster", + "If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster", args: { name: "list", }, @@ -1571,7 +1571,7 @@ const completionSpec: Fig.Spec = { { name: "--global-node-groups-to-retain", description: - "If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. ElastiCache for Redis will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster", + "If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. ElastiCache (Redis OSS) will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster", args: { name: "list", }, @@ -1608,7 +1608,7 @@ const completionSpec: Fig.Spec = { { name: "decrease-replica-count", description: - "Dynamically decreases the number of replicas in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time", + "Dynamically decreases the number of replicas in a Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time", options: [ { name: "--replication-group-id", @@ -1621,7 +1621,7 @@ const completionSpec: Fig.Spec = { { name: "--new-replica-count", description: - "The number of read replica nodes you want at the completion of this operation. For Redis (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. The minimum number of replicas in a shard or replication group is: Redis (cluster mode disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Redis (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)", + "The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. The minimum number of replicas in a shard or replication group is: Redis OSS (cluster mode disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)", args: { name: "integer", }, @@ -1629,7 +1629,7 @@ const completionSpec: Fig.Spec = { { name: "--replica-configuration", description: - "A list of ConfigureShard objects that can be used to configure each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones", + "A list of ConfigureShard objects that can be used to configure each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones", args: { name: "list", }, @@ -1674,7 +1674,7 @@ const completionSpec: Fig.Spec = { { name: "delete-cache-cluster", description: - "Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation. This operation is not valid for: Redis (cluster mode enabled) clusters Redis (cluster mode disabled) clusters A cluster that is the last read replica of a replication group A cluster that is the primary node of a replication group A node group (shard) that has Multi-AZ mode enabled A cluster from a Redis (cluster mode enabled) replication group A cluster that is not in the available state", + "Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation. This operation is not valid for: Redis OSS (cluster mode enabled) clusters Redis OSS (cluster mode disabled) clusters A cluster that is the last read replica of a replication group A cluster that is the primary node of a replication group A node group (shard) that has Multi-AZ mode enabled A cluster from a Redis OSS (cluster mode enabled) replication group A cluster that is not in the available state", options: [ { name: "--cache-cluster-id", @@ -1851,7 +1851,7 @@ const completionSpec: Fig.Spec = { { name: "delete-replication-group", description: - "Deletes an existing replication group. By default, this operation deletes the entire replication group, including the primary/primaries and all of the read replicas. If the replication group has only one primary, you can optionally delete only the read replicas, while retaining the primary by setting RetainPrimaryCluster=true. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this operation. This operation is valid for Redis only", + "Deletes an existing replication group. By default, this operation deletes the entire replication group, including the primary/primaries and all of the read replicas. If the replication group has only one primary, you can optionally delete only the read replicas, while retaining the primary by setting RetainPrimaryCluster=true. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this operation. CreateSnapshot permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied exception. This operation is valid for Redis OSS only", options: [ { name: "--replication-group-id", @@ -1900,7 +1900,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-serverless-cache", - description: "Deletes a specified existing serverless cache", + description: + "Deletes a specified existing serverless cache. CreateServerlessCacheSnapshot permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied exception", options: [ { name: "--serverless-cache-name", @@ -1912,7 +1913,7 @@ const completionSpec: Fig.Spec = { { name: "--final-snapshot-name", description: - "Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis only. Default: NULL, i.e. a final snapshot is not taken", + "Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis OSS and Serverless Memcached only. Default: NULL, i.e. a final snapshot is not taken", args: { name: "string", }, @@ -1939,12 +1940,12 @@ const completionSpec: Fig.Spec = { { name: "delete-serverless-cache-snapshot", description: - "Deletes an existing serverless cache snapshot. Available for Redis only", + "Deletes an existing serverless cache snapshot. Available for Redis OSS and Serverless Memcached only", options: [ { name: "--serverless-cache-snapshot-name", description: - "Idenfitier of the snapshot to be deleted. Available for Redis only", + "Idenfitier of the snapshot to be deleted. Available for Redis OSS and Serverless Memcached only", args: { name: "string", }, @@ -1971,7 +1972,7 @@ const completionSpec: Fig.Spec = { { name: "delete-snapshot", description: - "Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this operation. This operation is valid for Redis only", + "Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this operation. This operation is valid for Redis OSS only", options: [ { name: "--snapshot-name", @@ -2002,7 +2003,7 @@ const completionSpec: Fig.Spec = { { name: "delete-user", description: - "For Redis engine version 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC)", + "For Redis OSS engine version 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC)", options: [ { name: "--user-id", @@ -2033,7 +2034,7 @@ const completionSpec: Fig.Spec = { { name: "delete-user-group", description: - "For Redis engine version 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC)", + "For Redis OSS engine version 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC)", options: [ { name: "--user-group-id", @@ -2103,12 +2104,12 @@ const completionSpec: Fig.Spec = { { name: "--show-cache-clusters-not-in-replication-groups", description: - "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis clusters", + "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters", }, { name: "--no-show-cache-clusters-not-in-replication-groups", description: - "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis clusters", + "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters", }, { name: "--cli-input-json", @@ -2805,7 +2806,7 @@ const completionSpec: Fig.Spec = { { name: "describe-replication-groups", description: - "Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation is valid for Redis only", + "Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation is valid for Redis OSS only", options: [ { name: "--replication-group-id", @@ -2898,7 +2899,7 @@ const completionSpec: Fig.Spec = { { name: "--cache-node-type", description: - "The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later", + "The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later", args: { name: "string", }, @@ -3001,7 +3002,7 @@ const completionSpec: Fig.Spec = { { name: "--cache-node-type", description: - "The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is not supported on T1 instances. Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later", + "The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported Node Types R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later", args: { name: "string", }, @@ -3092,12 +3093,12 @@ const completionSpec: Fig.Spec = { { name: "describe-serverless-cache-snapshots", description: - "Returns information about serverless cache snapshots. By default, this API lists all of the customer\u2019s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Redis only", + "Returns information about serverless cache snapshots. By default, this API lists all of the customer\u2019s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Redis OSS and Serverless Memcached only", options: [ { name: "--serverless-cache-name", description: - "The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Redis only", + "The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Redis OSS and Serverless Memcached only", args: { name: "string", }, @@ -3105,7 +3106,7 @@ const completionSpec: Fig.Spec = { { name: "--serverless-cache-snapshot-name", description: - "The identifier of the serverless cache\u2019s snapshot. If this parameter is specified, only this snapshot is described. Available for Redis only", + "The identifier of the serverless cache\u2019s snapshot. If this parameter is specified, only this snapshot is described. Available for Redis OSS and Serverless Memcached only", args: { name: "string", }, @@ -3113,7 +3114,7 @@ const completionSpec: Fig.Spec = { { name: "--snapshot-type", description: - "The type of snapshot that is being described. Available for Redis only", + "The type of snapshot that is being described. Available for Redis OSS and Serverless Memcached only", args: { name: "string", }, @@ -3121,7 +3122,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis only", + "An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only", args: { name: "string", }, @@ -3129,7 +3130,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Redis only.The default is 50. The Validation Constraints are a maximum of 50", + "The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50", args: { name: "integer", }, @@ -3329,7 +3330,7 @@ const completionSpec: Fig.Spec = { { name: "describe-snapshots", description: - "Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation is valid for Redis only", + "Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation is valid for Redis OSS only", options: [ { name: "--replication-group-id", @@ -3460,7 +3461,7 @@ const completionSpec: Fig.Spec = { { name: "--engine", description: - "The Elasticache engine to which the update applies. Either Redis or Memcached", + "The Elasticache engine to which the update applies. Either Redis OSS or Memcached", args: { name: "string", }, @@ -3632,7 +3633,7 @@ const completionSpec: Fig.Spec = { options: [ { name: "--engine", - description: "The Redis engine", + description: "The Redis OSS engine", args: { name: "string", }, @@ -3760,12 +3761,12 @@ const completionSpec: Fig.Spec = { { name: "export-serverless-cache-snapshot", description: - "Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis only", + "Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis OSS only", options: [ { name: "--serverless-cache-snapshot-name", description: - "The identifier of the serverless cache snapshot to be exported to S3. Available for Redis only", + "The identifier of the serverless cache snapshot to be exported to S3. Available for Redis OSS only", args: { name: "string", }, @@ -3773,7 +3774,7 @@ const completionSpec: Fig.Spec = { { name: "--s3-bucket-name", description: - "Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region as the snapshot. Available for Redis only", + "Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region as the snapshot. Available for Redis OSS only", args: { name: "string", }, @@ -3901,7 +3902,7 @@ const completionSpec: Fig.Spec = { { name: "increase-replica-count", description: - "Dynamically increases the number of replicas in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time", + "Dynamically increases the number of replicas in a Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time", options: [ { name: "--replication-group-id", @@ -3914,7 +3915,7 @@ const completionSpec: Fig.Spec = { { name: "--new-replica-count", description: - "The number of read replica nodes you want at the completion of this operation. For Redis (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups", + "The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups", args: { name: "integer", }, @@ -3922,7 +3923,7 @@ const completionSpec: Fig.Spec = { { name: "--replica-configuration", description: - "A list of ConfigureShard objects that can be used to configure each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones", + "A list of ConfigureShard objects that can be used to configure each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones", args: { name: "list", }, @@ -3959,7 +3960,7 @@ const completionSpec: Fig.Spec = { { name: "list-allowed-node-type-modifications", description: - "Lists all available node types that you can scale your Redis cluster's or replication group's current node type. When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation", + "Lists all available node types that you can scale your Redis OSS cluster's or replication group's current node type. When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation", options: [ { name: "--cache-cluster-id", @@ -4044,7 +4045,7 @@ const completionSpec: Fig.Spec = { { name: "--num-cache-nodes", description: - "The number of cache nodes that the cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled. If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove. For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately). A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cluster", + "The number of cache nodes that the cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled. If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately). A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cluster", args: { name: "integer", }, @@ -4142,12 +4143,12 @@ const completionSpec: Fig.Spec = { { name: "--auto-minor-version-upgrade", description: - "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", + "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", }, { name: "--no-auto-minor-version-upgrade", description: - "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", + "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", }, { name: "--snapshot-retention-limit", @@ -4184,7 +4185,7 @@ const completionSpec: Fig.Spec = { { name: "--auth-token-update-strategy", description: - "Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis AUTH", + "Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis OSS AUTH", args: { name: "string", }, @@ -4199,7 +4200,7 @@ const completionSpec: Fig.Spec = { { name: "--ip-discovery", description: - "The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", + "The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", args: { name: "string", }, @@ -4391,7 +4392,7 @@ const completionSpec: Fig.Spec = { { name: "modify-replication-group", description: - "Modifies the settings for a replication group. This is limited to Redis 7 and newer. Scaling for Amazon ElastiCache for Redis (cluster mode enabled) in the ElastiCache User Guide ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This operation is valid for Redis only", + "Modifies the settings for a replication group. This is limited to Redis OSS 7 and newer. Scaling for Amazon ElastiCache (Redis OSS) (cluster mode enabled) in the ElastiCache User Guide ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This operation is valid for Redis OSS only", options: [ { name: "--replication-group-id", @@ -4419,7 +4420,7 @@ const completionSpec: Fig.Spec = { { name: "--snapshotting-cluster-id", description: - "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups", + "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups", args: { name: "string", }, @@ -4518,12 +4519,12 @@ const completionSpec: Fig.Spec = { { name: "--auto-minor-version-upgrade", description: - "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", + "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", }, { name: "--no-auto-minor-version-upgrade", description: - "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", + "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions", }, { name: "--snapshot-retention-limit", @@ -4560,7 +4561,7 @@ const completionSpec: Fig.Spec = { { name: "--auth-token-update-strategy", description: - "Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis AUTH", + "Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis OSS AUTH", args: { name: "string", }, @@ -4601,7 +4602,7 @@ const completionSpec: Fig.Spec = { { name: "--ip-discovery", description: - "The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", + "The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system", args: { name: "string", }, @@ -4619,7 +4620,7 @@ const completionSpec: Fig.Spec = { { name: "--transit-encryption-mode", description: - "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can set the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required", + "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can set the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required", args: { name: "string", }, @@ -4627,7 +4628,7 @@ const completionSpec: Fig.Spec = { { name: "--cluster-mode", description: - "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled", + "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled", args: { name: "string", }, @@ -4659,7 +4660,7 @@ const completionSpec: Fig.Spec = { { name: "--replication-group-id", description: - "The name of the Redis (cluster mode enabled) cluster (replication group) on which the shards are to be configured", + "The name of the Redis OSS (cluster mode enabled) cluster (replication group) on which the shards are to be configured", args: { name: "string", }, @@ -4693,7 +4694,7 @@ const completionSpec: Fig.Spec = { { name: "--node-groups-to-remove", description: - "If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster", + "If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster", args: { name: "list", }, @@ -4701,7 +4702,7 @@ const completionSpec: Fig.Spec = { { name: "--node-groups-to-retain", description: - "If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache for Redis will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster", + "If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster", args: { name: "list", }, @@ -4755,17 +4756,17 @@ const completionSpec: Fig.Spec = { { name: "--remove-user-group", description: - "The identifier of the UserGroup to be removed from association with the Redis serverless cache. Available for Redis only. Default is NULL", + "The identifier of the UserGroup to be removed from association with the Redis OSS serverless cache. Available for Redis OSS only. Default is NULL", }, { name: "--no-remove-user-group", description: - "The identifier of the UserGroup to be removed from association with the Redis serverless cache. Available for Redis only. Default is NULL", + "The identifier of the UserGroup to be removed from association with the Redis OSS serverless cache. Available for Redis OSS only. Default is NULL", }, { name: "--user-group-id", description: - "The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. Default is NULL - the existing UserGroup is not removed", + "The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL - the existing UserGroup is not removed", args: { name: "string", }, @@ -4781,7 +4782,7 @@ const completionSpec: Fig.Spec = { { name: "--snapshot-retention-limit", description: - "The number of days for which Elasticache retains automatic snapshots before deleting them. Available for Redis only. Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 days", + "The number of days for which Elasticache retains automatic snapshots before deleting them. Available for Redis OSS and Serverless Memcached only. Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 days", args: { name: "integer", }, @@ -4789,7 +4790,7 @@ const completionSpec: Fig.Spec = { { name: "--daily-snapshot-time", description: - "The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis only. The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed", + "The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis OSS and Serverless Memcached only. The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed", args: { name: "string", }, @@ -4927,7 +4928,7 @@ const completionSpec: Fig.Spec = { { name: "purchase-reserved-cache-nodes-offering", description: - "Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis or Managing Costs with Reserved Nodes for Memcached", + "Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis OSS or Managing Costs with Reserved Nodes for Memcached", options: [ { name: "--reserved-cache-nodes-offering-id", @@ -5022,7 +5023,7 @@ const completionSpec: Fig.Spec = { { name: "reboot-cache-cluster", description: - "Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING. The reboot causes the contents of the cache (for each cache node being rebooted) to be lost. When the reboot is complete, a cluster event is created. Rebooting a cluster is currently supported on Memcached and Redis (cluster mode disabled) clusters. Rebooting is not supported on Redis (cluster mode enabled) clusters. If you make changes to parameters that require a Redis (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process", + "Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING. The reboot causes the contents of the cache (for each cache node being rebooted) to be lost. When the reboot is complete, a cluster event is created. Rebooting a cluster is currently supported on Memcached and Redis OSS (cluster mode disabled) clusters. Rebooting is not supported on Redis OSS (cluster mode enabled) clusters. If you make changes to parameters that require a Redis OSS (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process", options: [ { name: "--cache-cluster-id", @@ -5211,7 +5212,7 @@ const completionSpec: Fig.Spec = { { name: "--customer-node-endpoint-list", description: - "List of endpoints from which data should be migrated. For Redis (cluster mode disabled), list should have only one element", + "List of endpoints from which data should be migrated. For Redis OSS (cluster mode disabled), list should have only one element", args: { name: "list", }, @@ -5238,7 +5239,7 @@ const completionSpec: Fig.Spec = { { name: "test-failover", description: - "Represents the input of a TestFailover operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Test Failover API called for node group Cache cluster message: Failover from primary node to replica node completed Replication group message: Failover from primary node to replica node completed Cache cluster message: Recovering cache nodes Cache cluster message: Finished recovery for cache nodes For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide", + "Represents the input of a TestFailover operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Redis OSS (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Test Failover API called for node group Cache cluster message: Failover from primary node to replica node completed Replication group message: Failover from primary node to replica node completed Cache cluster message: Recovering cache nodes Cache cluster message: Finished recovery for cache nodes For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide", options: [ { name: "--replication-group-id", @@ -5362,12 +5363,12 @@ const completionSpec: Fig.Spec = { { name: "--show-cache-clusters-not-in-replication-groups", description: - "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis clusters", + "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters", }, { name: "--no-show-cache-clusters-not-in-replication-groups", description: - "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis clusters", + "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters", }, { name: "--cli-input-json", @@ -5454,12 +5455,12 @@ const completionSpec: Fig.Spec = { { name: "--show-cache-clusters-not-in-replication-groups", description: - "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis clusters", + "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters", }, { name: "--no-show-cache-clusters-not-in-replication-groups", description: - "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis clusters", + "An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters", }, { name: "--cli-input-json", diff --git a/src/aws/elbv2.ts b/src/aws/elbv2.ts index 4c528b2197ae..0501ea293fe5 100644 --- a/src/aws/elbv2.ts +++ b/src/aws/elbv2.ts @@ -678,6 +678,43 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-shared-trust-store-association", + description: "Deletes a shared trust store association", + options: [ + { + name: "--trust-store-arn", + description: "The Amazon Resource Name (ARN) of the trust store", + args: { + name: "string", + }, + }, + { + name: "--resource-arn", + description: "The Amazon Resource Name (ARN) of the resource", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-target-group", description: @@ -1371,7 +1408,7 @@ const completionSpec: Fig.Spec = { }, { name: "--include", - description: "Used to inclue anomaly detection information", + description: "Used to include anomaly detection information", args: { name: "list", }, @@ -1444,7 +1481,7 @@ const completionSpec: Fig.Spec = { { name: "describe-trust-store-revocations", description: - "Describes the revocation files in use by the specified trust store arn, or revocation ID", + "Describes the revocation files in use by the specified trust store or revocation files", options: [ { name: "--trust-store-arn", @@ -1497,8 +1534,7 @@ const completionSpec: Fig.Spec = { }, { name: "describe-trust-stores", - description: - "Describes all trust stores for a given account by trust store arn\u2019s or name", + description: "Describes all trust stores for the specified account", options: [ { name: "--trust-store-arns", @@ -1548,6 +1584,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-resource-policy", + description: "Retrieves the resource policy for a specified resource", + options: [ + { + name: "--resource-arn", + description: "The Amazon Resource Name (ARN) of the resource", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-trust-store-ca-certificates-bundle", description: @@ -1928,7 +1994,8 @@ const completionSpec: Fig.Spec = { }, { name: "modify-trust-store", - description: "Update the ca certificate bundle for a given trust store", + description: + "Update the ca certificate bundle for the specified trust store", options: [ { name: "--trust-store-arn", @@ -2147,7 +2214,7 @@ const completionSpec: Fig.Spec = { { name: "--ip-address-type", description: - "Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can\u2019t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses)", + "Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). Note: Application Load Balancer authentication only supports IPv4 addresses when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer cannot complete the authentication process, resulting in HTTP 500 errors. [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can\u2019t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses)", args: { name: "string", }, @@ -2542,7 +2609,7 @@ const completionSpec: Fig.Spec = { }, { name: "--include", - description: "Used to inclue anomaly detection information", + description: "Used to include anomaly detection information", args: { name: "list", }, @@ -2587,7 +2654,7 @@ const completionSpec: Fig.Spec = { }, { name: "--include", - description: "Used to inclue anomaly detection information", + description: "Used to include anomaly detection information", args: { name: "list", }, diff --git a/src/aws/iotsitewise.ts b/src/aws/iotsitewise.ts index 847bb4a6c1e5..4af421ee850d 100644 --- a/src/aws/iotsitewise.ts +++ b/src/aws/iotsitewise.ts @@ -525,7 +525,7 @@ const completionSpec: Fig.Spec = { options: [ { name: "--asset-model-name", - description: "A unique, friendly name for the asset model", + description: "A unique name for the asset model", args: { name: "string", }, @@ -556,7 +556,7 @@ const completionSpec: Fig.Spec = { { name: "--asset-model-composite-models", description: - "The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see ", + "The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see Creating custom composite models (Components) in the IoT SiteWise User Guide", args: { name: "list", }, @@ -666,7 +666,7 @@ const completionSpec: Fig.Spec = { }, { name: "--asset-model-composite-model-name", - description: "A unique, friendly name for the composite model", + description: "A unique name for the composite model", args: { name: "string", }, @@ -689,7 +689,8 @@ const completionSpec: Fig.Spec = { }, { name: "--composed-asset-model-id", - description: "The ID of a composite model on this asset", + description: + "The ID of a component model which is reused to create this composite model", args: { name: "string", }, @@ -697,7 +698,7 @@ const completionSpec: Fig.Spec = { { name: "--asset-model-composite-model-properties", description: - "The property definitions of the composite model. For more information, see . You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide", + "The property definitions of the composite model. For more information, see Inline custom composite models in the IoT SiteWise User Guide. You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide", args: { name: "list", }, @@ -879,7 +880,7 @@ const completionSpec: Fig.Spec = { options: [ { name: "--gateway-name", - description: "A unique, friendly name for the gateway", + description: "A unique name for the gateway", args: { name: "string", }, @@ -4373,7 +4374,7 @@ const completionSpec: Fig.Spec = { { name: "update-asset-model", description: - "Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the IoT SiteWise User Guide. This operation overwrites the existing model with the provided model. To avoid deleting your asset model's properties or hierarchies, you must include their IDs and definitions in the updated asset model payload. For more information, see DescribeAssetModel. If you remove a property from an asset model, IoT SiteWise deletes all previous data for that property. If you remove a hierarchy definition from an asset model, IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the type or data type of an existing property", + "Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the IoT SiteWise User Guide. If you remove a property from an asset model, IoT SiteWise deletes all previous data for that property. You can\u2019t change the type or data type of an existing property. To replace an existing asset model property with a new one with the same name, do the following: Submit an UpdateAssetModel request with the entire existing property removed. Submit a second UpdateAssetModel request that includes the new property. The new asset property will have the same name as the previous one and IoT SiteWise will generate a new unique id", options: [ { name: "--asset-model-id", @@ -4385,7 +4386,7 @@ const completionSpec: Fig.Spec = { }, { name: "--asset-model-name", - description: "A unique, friendly name for the asset model", + description: "A unique name for the asset model", args: { name: "string", }, @@ -4416,7 +4417,7 @@ const completionSpec: Fig.Spec = { { name: "--asset-model-composite-models", description: - "The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see ", + "The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see Creating custom composite models (Components) in the IoT SiteWise User Guide", args: { name: "list", }, @@ -4492,7 +4493,7 @@ const completionSpec: Fig.Spec = { }, { name: "--asset-model-composite-model-name", - description: "A unique, friendly name for the composite model", + description: "A unique name for the composite model", args: { name: "string", }, @@ -4508,7 +4509,7 @@ const completionSpec: Fig.Spec = { { name: "--asset-model-composite-model-properties", description: - "The property definitions of the composite model. For more information, see . You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide", + "The property definitions of the composite model. For more information, see Inline custom composite models in the IoT SiteWise User Guide. You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide", args: { name: "list", }, @@ -4677,7 +4678,7 @@ const completionSpec: Fig.Spec = { }, { name: "--gateway-name", - description: "A unique, friendly name for the gateway", + description: "A unique name for the gateway", args: { name: "string", }, diff --git a/src/aws/lexv2-models.ts b/src/aws/lexv2-models.ts index f87ebfc54682..f8f42a3bcbb1 100644 --- a/src/aws/lexv2-models.ts +++ b/src/aws/lexv2-models.ts @@ -806,7 +806,7 @@ const completionSpec: Fig.Spec = { { name: "create-resource-policy-statement", description: - "Adds a new resource policy statement to a bot or bot alias. If a resource policy exists, the statement is added to the current resource policy. If a policy doesn't exist, a new policy is created. You can't create a resource policy statement that allows cross-account access", + "Adds a new resource policy statement to a bot or bot alias. If a resource policy exists, the statement is added to the current resource policy. If a policy doesn't exist, a new policy is created. You can't create a resource policy statement that allows cross-account access. You need to add the CreateResourcePolicy or UpdateResourcePolicy action to the bot role in order to call the API", options: [ { name: "--resource-arn", @@ -1587,7 +1587,7 @@ const completionSpec: Fig.Spec = { { name: "delete-resource-policy-statement", description: - "Deletes a policy statement from a resource policy. If you delete the last statement from a policy, the policy is deleted. If you specify a statement ID that doesn't exist in the policy, or if the bot or bot alias doesn't have a policy attached, Amazon Lex returns an exception", + "Deletes a policy statement from a resource policy. If you delete the last statement from a policy, the policy is deleted. If you specify a statement ID that doesn't exist in the policy, or if the bot or bot alias doesn't have a policy attached, Amazon Lex returns an exception. You need to add the DeleteResourcePolicy or UpdateResourcePolicy action to the bot role in order to call the API", options: [ { name: "--resource-arn", diff --git a/src/aws/medical-imaging.ts b/src/aws/medical-imaging.ts index 635768b997c8..0e6efca6fce1 100644 --- a/src/aws/medical-imaging.ts +++ b/src/aws/medical-imaging.ts @@ -28,6 +28,16 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--force", + description: + "Setting this flag will force the CopyImageSet operation, even if Patient, Study, or Series level metadata are mismatched across the sourceImageSet and destinationImageSet", + }, + { + name: "--no-force", + description: + "Setting this flag will force the CopyImageSet operation, even if Patient, Study, or Series level metadata are mismatched across the sourceImageSet and destinationImageSet", + }, { name: "--cli-input-json", description: @@ -861,6 +871,16 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--force", + description: + "Setting this flag will force the UpdateImageSetMetadata operation for the following attributes: Tag.StudyInstanceUID, Tag.SeriesInstanceUID, Tag.SOPInstanceUID, and Tag.StudyID Adding, removing, or updating private tags for an individual SOP Instance", + }, + { + name: "--no-force", + description: + "Setting this flag will force the UpdateImageSetMetadata operation for the following attributes: Tag.StudyInstanceUID, Tag.SeriesInstanceUID, Tag.SOPInstanceUID, and Tag.StudyID Adding, removing, or updating private tags for an individual SOP Instance", + }, { name: "--update-image-set-metadata-updates", description: "Update image set metadata updates", diff --git a/src/aws/memorydb.ts b/src/aws/memorydb.ts index 057acddf07c6..1b27e701ab14 100644 --- a/src/aws/memorydb.ts +++ b/src/aws/memorydb.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "memorydb", description: - "MemoryDB for Redis is a fully managed, Redis-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis, a popular open source data store, enabling you to leverage Redis\u2019 flexible and friendly data structures, APIs, and commands", + "MemoryDB is a fully managed, Redis OSS-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis OSS, a popular open source data store, enabling you to leverage Redis OSS\u2019 flexible and friendly data structures, APIs, and commands", subcommands: [ { name: "batch-update-cluster", @@ -309,7 +309,7 @@ const completionSpec: Fig.Spec = { { name: "--engine-version", description: - "The version number of the Redis engine to be used for the cluster", + "The version number of the Redis OSS engine to be used for the cluster", args: { name: "string", }, @@ -915,11 +915,11 @@ const completionSpec: Fig.Spec = { }, { name: "describe-engine-versions", - description: "Returns a list of the available Redis engine versions", + description: "Returns a list of the available Redis OSS engine versions", options: [ { name: "--engine-version", - description: "The Redis engine version", + description: "The Redis OSS engine version", args: { name: "string", }, diff --git a/src/aws/network-firewall.ts b/src/aws/network-firewall.ts index cf5ce6c16a55..0e8ee88af9dc 100644 --- a/src/aws/network-firewall.ts +++ b/src/aws/network-firewall.ts @@ -411,7 +411,7 @@ const completionSpec: Fig.Spec = { { name: "create-tls-inspection-configuration", description: - "Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration contains Certificate Manager certificate associations between and the scope configurations that Network Firewall uses to decrypt and re-encrypt traffic traveling through your firewall. After you create a TLS inspection configuration, you can associate it with a new firewall policy. To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration. To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource. To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration. For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide", + "Creates an Network Firewall TLS inspection configuration. Network Firewall uses TLS inspection configurations to decrypt your firewall's inbound and outbound SSL/TLS traffic. After decryption, Network Firewall inspects the traffic according to your firewall policy's stateful rules, and then re-encrypts it before sending it to its destination. You can enable inspection of your firewall's inbound traffic, outbound traffic, or both. To use TLS inspection with your firewall, you must first import or provision certificates using ACM, create a TLS inspection configuration, add that configuration to a new firewall policy, and then associate that policy with your firewall. To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration. To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource. To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration. For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide", options: [ { name: "--tls-inspection-configuration-name", diff --git a/src/aws/pinpoint-sms-voice-v2.ts b/src/aws/pinpoint-sms-voice-v2.ts index bbf2958bca4d..05ce31c07030 100644 --- a/src/aws/pinpoint-sms-voice-v2.ts +++ b/src/aws/pinpoint-sms-voice-v2.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "pinpoint-sms-voice-v2", description: - "Welcome to the Amazon Pinpoint SMS and Voice, version 2 API Reference. This guide provides information about Amazon Pinpoint SMS and Voice, version 2 API resources, including supported HTTP methods, parameters, and schemas. Amazon Pinpoint is an Amazon Web Services service that you can use to engage with your recipients across multiple messaging channels. The Amazon Pinpoint SMS and Voice, version 2 API provides programmatic access to options that are unique to the SMS and voice channels. Amazon Pinpoint SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API. If you're new to Amazon Pinpoint SMS, it's also helpful to review the Amazon Pinpoint SMS User Guide. The Amazon Pinpoint Developer Guide provides tutorials, code samples, and procedures that demonstrate how to use Amazon Pinpoint SMS features programmatically and how to integrate Amazon Pinpoint functionality into mobile apps and other types of applications. The guide also provides key information, such as Amazon Pinpoint integration with other Amazon Web Services services, and the quotas that apply to use of the service. Regional availability The Amazon Pinpoint SMS and Voice, version 2 API Reference is available in several Amazon Web Services Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Pinpoint endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference. In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure", + "Welcome to the AWS End User Messaging SMS and Voice, version 2 API Reference. This guide provides information about AWS End User Messaging SMS and Voice, version 2 API resources, including supported HTTP methods, parameters, and schemas. Amazon Pinpoint is an Amazon Web Services service that you can use to engage with your recipients across multiple messaging channels. The AWS End User Messaging SMS and Voice, version 2 API provides programmatic access to options that are unique to the SMS and voice channels. AWS End User Messaging SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API. If you're new to AWS End User Messaging SMS and Voice, it's also helpful to review the AWS End User Messaging SMS User Guide. The AWS End User Messaging SMS User Guide provides tutorials, code samples, and procedures that demonstrate how to use AWS End User Messaging SMS and Voice features programmatically and how to integrate functionality into mobile apps and other types of applications. The guide also provides key information, such as AWS End User Messaging SMS and Voice integration with other Amazon Web Services services, and the quotas that apply to use of the service. Regional availability The AWS End User Messaging SMS and Voice version 2 API Reference is available in several Amazon Web Services Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Pinpoint endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference. In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure", subcommands: [ { name: "associate-origination-identity", @@ -147,7 +147,7 @@ const completionSpec: Fig.Spec = { { name: "create-event-destination", description: - "Creates a new event destination in a configuration set. An event destination is a location where you send message events. The event options are Amazon CloudWatch, Amazon Kinesis Data Firehose, or Amazon SNS. For example, when a message is delivered successfully, you can send information about that event to an event destination, or send notifications to endpoints that are subscribed to an Amazon SNS topic. Each configuration set can contain between 0 and 5 event destinations. Each event destination can contain a reference to a single destination, such as a CloudWatch or Kinesis Data Firehose destination", + "Creates a new event destination in a configuration set. An event destination is a location where you send message events. The event options are Amazon CloudWatch, Amazon Data Firehose, or Amazon SNS. For example, when a message is delivered successfully, you can send information about that event to an event destination, or send notifications to endpoints that are subscribed to an Amazon SNS topic. Each configuration set can contain between 0 and 5 event destinations. Each event destination can contain a reference to a single destination, such as a CloudWatch or Firehose destination", options: [ { name: "--configuration-set-name", @@ -167,7 +167,7 @@ const completionSpec: Fig.Spec = { { name: "--matching-event-types", description: - 'An array of event types that determine which events to log. If "ALL" is used, then Amazon Pinpoint logs every event type. The TEXT_SENT event type is not supported', + 'An array of event types that determine which events to log. If "ALL" is used, then AWS End User Messaging SMS and Voice logs every event type. The TEXT_SENT event type is not supported', args: { name: "list", }, @@ -183,7 +183,7 @@ const completionSpec: Fig.Spec = { { name: "--kinesis-firehose-destination", description: - "An object that contains information about an event destination for logging to Amazon Kinesis Data Firehose", + "An object that contains information about an event destination for logging to Amazon Data Firehose", args: { name: "structure", }, @@ -226,7 +226,7 @@ const completionSpec: Fig.Spec = { { name: "create-opt-out-list", description: - 'Creates a new opt-out list. If the opt-out list name already exists, an error is returned. An opt-out list is a list of phone numbers that are opted out, meaning you can\'t send SMS or voice messages to them. If end user replies with the keyword "STOP," an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported opt-out keywords, see SMS opt out in the Amazon Pinpoint User Guide', + 'Creates a new opt-out list. If the opt-out list name already exists, an error is returned. An opt-out list is a list of phone numbers that are opted out, meaning you can\'t send SMS or voice messages to them. If end user replies with the keyword "STOP," an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported opt-out keywords, see SMS opt out in the AWS End User Messaging SMS User Guide', options: [ { name: "--opt-out-list-name", @@ -278,7 +278,7 @@ const completionSpec: Fig.Spec = { { name: "--origination-identity", description: - "The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn", + "The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity", args: { name: "string", }, @@ -294,7 +294,7 @@ const completionSpec: Fig.Spec = { { name: "--message-type", description: - "The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive", + "The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. After the pool is created the MessageType can't be changed", args: { name: "string", }, @@ -776,7 +776,7 @@ const completionSpec: Fig.Spec = { { name: "delete-keyword", description: - 'Deletes an existing keyword from an origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, Amazon Pinpoint responds with a customizable message. Keywords "HELP" and "STOP" can\'t be deleted or modified', + 'Deletes an existing keyword from an origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message. Keywords "HELP" and "STOP" can\'t be deleted or modified', options: [ { name: "--origination-identity", @@ -1073,7 +1073,7 @@ const completionSpec: Fig.Spec = { { name: "delete-text-message-spend-limit-override", description: - "Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is controlled by Amazon Web Services. For more information on spend limits (quotas) see Amazon Pinpoint quotas in the Amazon Pinpoint Developer Guide", + "Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is controlled by Amazon Web Services. For more information on spend limits (quotas) see Quotas in the AWS End User Messaging SMS User Guide", options: [ { name: "--cli-input-json", @@ -1128,7 +1128,7 @@ const completionSpec: Fig.Spec = { { name: "delete-voice-message-spend-limit-override", description: - "Deletes an account level monthly spend limit override for sending voice messages. Deleting a spend limit override sets the EnforcedLimit equal to the MaxLimit, which is controlled by Amazon Web Services. For more information on spending limits (quotas) see Amazon Pinpoint quotas in the Amazon Pinpoint Developer Guide", + "Deletes an account level monthly spend limit override for sending voice messages. Deleting a spend limit override sets the EnforcedLimit equal to the MaxLimit, which is controlled by Amazon Web Services. For more information on spending limits (quotas) see Quotas in the AWS End User Messaging SMS User Guide", options: [ { name: "--cli-input-json", @@ -1152,7 +1152,7 @@ const completionSpec: Fig.Spec = { { name: "describe-account-attributes", description: - "Describes attributes of your Amazon Web Services account. The supported account attributes include account tier, which indicates whether your account is in the sandbox or production environment. When you're ready to move your account out of the sandbox, create an Amazon Web Services Support case for a service limit increase request. New Amazon Pinpoint accounts are placed into an SMS or voice sandbox. The sandbox protects both Amazon Web Services end recipients and SMS or voice recipients from fraud and abuse", + "Describes attributes of your Amazon Web Services account. The supported account attributes include account tier, which indicates whether your account is in the sandbox or production environment. When you're ready to move your account out of the sandbox, create an Amazon Web Services Support case for a service limit increase request. New accounts are placed into an SMS or voice sandbox. The sandbox protects both Amazon Web Services end recipients and SMS or voice recipients from fraud and abuse", options: [ { name: "--next-token", @@ -1216,7 +1216,7 @@ const completionSpec: Fig.Spec = { { name: "describe-account-limits", description: - "Describes the current Amazon Pinpoint SMS Voice V2 resource quotas for your account. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value. When you establish an Amazon Web Services account, the account has initial quotas on the maximum number of configuration sets, opt-out lists, phone numbers, and pools that you can create in a given Region. For more information see Amazon Pinpoint quotas in the Amazon Pinpoint Developer Guide", + "Describes the current AWS End User Messaging SMS and Voice SMS Voice V2 resource quotas for your account. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value. When you establish an Amazon Web Services account, the account has initial quotas on the maximum number of configuration sets, opt-out lists, phone numbers, and pools that you can create in a given Region. For more information see Quotas in the AWS End User Messaging SMS User Guide", options: [ { name: "--next-token", @@ -1360,7 +1360,7 @@ const completionSpec: Fig.Spec = { { name: "describe-keywords", description: - "Describes the specified keywords or all keywords on your origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, Amazon Pinpoint responds with a customizable message. If you specify a keyword that isn't valid, an error is returned", + "Describes the specified keywords or all keywords on your origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message. If you specify a keyword that isn't valid, an error is returned", options: [ { name: "--origination-identity", @@ -2503,7 +2503,7 @@ const completionSpec: Fig.Spec = { { name: "describe-spend-limits", description: - "Describes the current Amazon Pinpoint monthly spend limits for sending voice and text messages. When you establish an Amazon Web Services account, the account has initial monthly spend limit in a given Region. For more information on increasing your monthly spend limit, see Requesting increases to your monthly SMS spending quota for Amazon Pinpoint in the Amazon Pinpoint User Guide", + "Describes the current monthly spend limits for sending voice and text messages. When you establish an Amazon Web Services account, the account has initial monthly spend limit in a given Region. For more information on increasing your monthly spend limit, see Requesting increases to your monthly SMS, MMS, or Voice spending quota in the AWS End User Messaging SMS User Guide", options: [ { name: "--next-token", @@ -3006,7 +3006,7 @@ const completionSpec: Fig.Spec = { { name: "put-keyword", description: - "Creates or updates a keyword configuration on an origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, Amazon Pinpoint responds with a customizable message. If you specify a keyword that isn't valid, an error is returned", + "Creates or updates a keyword configuration on an origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message. If you specify a keyword that isn't valid, an error is returned", options: [ { name: "--origination-identity", @@ -3229,7 +3229,7 @@ const completionSpec: Fig.Spec = { { name: "request-phone-number", description: - "Request an origination phone number for use in your account. For more information on phone number request see Requesting a number in the Amazon Pinpoint User Guide", + "Request an origination phone number for use in your account. For more information on phone number request see Request a phone number in the AWS End User Messaging SMS User Guide", options: [ { name: "--iso-country-code", @@ -3589,7 +3589,7 @@ const completionSpec: Fig.Spec = { { name: "send-text-message", description: - "Creates a new text message and sends it to a recipient's phone number. SMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit depends on the destination country of your messages, as well as the type of phone number (origination number) that you use to send the message. For more information, see Message Parts per Second (MPS) limits in the Amazon Pinpoint User Guide", + "Creates a new text message and sends it to a recipient's phone number. SendTextMessage only sends an SMS message to one recipient each time it is invoked. SMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit depends on the destination country of your messages, as well as the type of phone number (origination number) that you use to send the message. For more information about MPS, see Message Parts per Second (MPS) limits in the AWS End User Messaging SMS User Guide", options: [ { name: "--destination-phone-number", @@ -3640,7 +3640,7 @@ const completionSpec: Fig.Spec = { { name: "--max-price", description: - "The maximum amount that you want to spend, in US dollars, per each text message part. A text message can contain multiple parts", + "The maximum amount that you want to spend, in US dollars, per each text message. If the calculated amount to send the text message is greater than MaxPrice, the message is not sent and an error is returned", args: { name: "string", }, @@ -3648,7 +3648,7 @@ const completionSpec: Fig.Spec = { { name: "--time-to-live", description: - "How long the text message is valid for. By default this is 72 hours", + "How long the text message is valid for, in seconds. By default this is 72 hours. If the messages isn't handed off before the TTL expires we stop attempting to hand off the message and return TTL_EXPIRED event", args: { name: "integer", }, @@ -3664,7 +3664,7 @@ const completionSpec: Fig.Spec = { { name: "--destination-country-parameters", description: - "This field is used for any country-specific registration requirements. Currently, this setting is only used when you send messages to recipients in India using a sender ID. For more information see Special requirements for sending SMS messages to recipients in India", + "This field is used for any country-specific registration requirements. Currently, this setting is only used when you send messages to recipients in India using a sender ID. For more information see Special requirements for sending SMS messages to recipients in India. IN_ENTITY_ID The entity ID or Principal Entity (PE) ID that you received after completing the sender ID registration process. IN_TEMPLATE_ID The template ID that you received after completing the sender ID registration process. Make sure that the Template ID that you specify matches your message template exactly. If your message doesn't match the template that you provided during the registration process, the mobile carriers might reject your message", args: { name: "map", }, @@ -3672,12 +3672,12 @@ const completionSpec: Fig.Spec = { { name: "--dry-run", description: - "When set to true, the message is checked and validated, but isn't sent to the end recipient", + "When set to true, the message is checked and validated, but isn't sent to the end recipient. You are not charged for using DryRun. The Message Parts per Second (MPS) limit when using DryRun is five. If your origination identity has a lower MPS limit then the lower MPS limit is used. For more information about MPS limits, see Message Parts per Second (MPS) limits in the AWS End User Messaging SMS User Guide", }, { name: "--no-dry-run", description: - "When set to true, the message is checked and validated, but isn't sent to the end recipient", + "When set to true, the message is checked and validated, but isn't sent to the end recipient. You are not charged for using DryRun. The Message Parts per Second (MPS) limit when using DryRun is five. If your origination identity has a lower MPS limit then the lower MPS limit is used. For more information about MPS limits, see Message Parts per Second (MPS) limits in the AWS End User Messaging SMS User Guide", }, { name: "--protect-configuration-id", @@ -3708,7 +3708,7 @@ const completionSpec: Fig.Spec = { { name: "send-voice-message", description: - "Allows you to send a request that sends a voice message through Amazon Pinpoint. This operation uses Amazon Polly to convert a text script into a voice message", + "Allows you to send a request that sends a voice message. This operation uses Amazon Polly to convert a text script into a voice message", options: [ { name: "--destination-phone-number", @@ -4053,7 +4053,7 @@ const completionSpec: Fig.Spec = { { name: "tag-resource", description: - "Adds or overwrites only the specified tags for the specified Amazon Pinpoint SMS Voice, version 2 resource. When you specify an existing tag key, the value is overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer Guide", + "Adds or overwrites only the specified tags for the specified resource. When you specify an existing tag key, the value is overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see Tags in the AWS End User Messaging SMS User Guide", options: [ { name: "--resource-arn", @@ -4092,7 +4092,7 @@ const completionSpec: Fig.Spec = { { name: "untag-resource", description: - "Removes the association of the specified tags from an Amazon Pinpoint SMS Voice V2 resource. For more information on tags see Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer Guide", + "Removes the association of the specified tags from a resource. For more information on tags see Tags in the AWS End User Messaging SMS User Guide", options: [ { name: "--resource-arn", @@ -4131,7 +4131,7 @@ const completionSpec: Fig.Spec = { { name: "update-event-destination", description: - "Updates an existing event destination in a configuration set. You can update the IAM role ARN for CloudWatch Logs and Kinesis Data Firehose. You can also enable or disable the event destination. You may want to update an event destination to change its matching event types or updating the destination resource ARN. You can't change an event destination's type between CloudWatch Logs, Kinesis Data Firehose, and Amazon SNS", + "Updates an existing event destination in a configuration set. You can update the IAM role ARN for CloudWatch Logs and Firehose. You can also enable or disable the event destination. You may want to update an event destination to change its matching event types or updating the destination resource ARN. You can't change an event destination's type between CloudWatch Logs, Firehose, and Amazon SNS", options: [ { name: "--configuration-set-name", @@ -4175,7 +4175,7 @@ const completionSpec: Fig.Spec = { { name: "--kinesis-firehose-destination", description: - "An object that contains information about an event destination for logging to Kinesis Data Firehose", + "An object that contains information about an event destination for logging to Firehose", args: { name: "structure", }, @@ -4248,12 +4248,12 @@ const completionSpec: Fig.Spec = { { name: "--self-managed-opt-outs-enabled", description: - "By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests", + "By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests", }, { name: "--no-self-managed-opt-outs-enabled", description: - "By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests", + "By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests", }, { name: "--opt-out-list-name", @@ -4333,12 +4333,12 @@ const completionSpec: Fig.Spec = { { name: "--self-managed-opt-outs-enabled", description: - "By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests", + "By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests", }, { name: "--no-self-managed-opt-outs-enabled", description: - "By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests", + "By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests", }, { name: "--opt-out-list-name", @@ -4448,7 +4448,7 @@ const completionSpec: Fig.Spec = { { name: "--country-rule-set-updates", description: - "A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide", + "A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide", args: { name: "map", }, diff --git a/src/aws/rolesanywhere.ts b/src/aws/rolesanywhere.ts index b572a002f5fd..a628302c9e4f 100644 --- a/src/aws/rolesanywhere.ts +++ b/src/aws/rolesanywhere.ts @@ -8,6 +8,16 @@ const completionSpec: Fig.Spec = { description: "Creates a profile, a list of the roles that Roles Anywhere service is trusted to assume. You use profiles to intersect permissions with IAM managed policies. Required permissions: rolesanywhere:CreateProfile", options: [ + { + name: "--accept-role-session-name", + description: + "Used to determine if a custom role session name will be accepted in a temporary credential request", + }, + { + name: "--no-accept-role-session-name", + description: + "Used to determine if a custom role session name will be accepted in a temporary credential request", + }, { name: "--duration-seconds", description: @@ -1176,6 +1186,16 @@ const completionSpec: Fig.Spec = { description: "Updates a profile, a list of the roles that IAM Roles Anywhere service is trusted to assume. You use profiles to intersect permissions with IAM managed policies. Required permissions: rolesanywhere:UpdateProfile", options: [ + { + name: "--accept-role-session-name", + description: + "Used to determine if a custom role session name will be accepted in a temporary credential request", + }, + { + name: "--no-accept-role-session-name", + description: + "Used to determine if a custom role session name will be accepted in a temporary credential request", + }, { name: "--duration-seconds", description: diff --git a/src/aws/stepfunctions.ts b/src/aws/stepfunctions.ts index 0aad602fc819..c87d7de4148a 100644 --- a/src/aws/stepfunctions.ts +++ b/src/aws/stepfunctions.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "stepfunctions", description: - "Step Functions Step Functions is a service that lets you coordinate the components of distributed applications and microservices using visual workflows. You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues. Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API. For more information about Step Functions, see the Step Functions Developer Guide . If you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution and specify its parameter as StateMachineArn", + "Step Functions Step Functions coordinates the components of distributed applications and microservices using visual workflows. You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues. Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API. For more information about Step Functions, see the Step Functions Developer Guide . If you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution and specify its parameter as StateMachineArn", subcommands: [ { name: "create-activity", @@ -24,6 +24,13 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--encryption-configuration", + description: "Settings to configure server-side encryption", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -46,7 +53,7 @@ const completionSpec: Fig.Spec = { { name: "create-state-machine", description: - "Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the Step Functions User Guide. If you set the publish parameter of this API action to true, it publishes version 1 as the first revision of the state machine. This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes. CreateStateMachine is an idempotent API. Subsequent requests won\u2019t create a duplicate resource if it was already created. CreateStateMachine's idempotency check is based on the state machine name, definition, type, LoggingConfiguration, and TracingConfiguration. The check is also based on the publish and versionDescription parameters. If a following request has a different roleArn or tags, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even if they are different", + "Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the Step Functions User Guide. If you set the publish parameter of this API action to true, it publishes version 1 as the first revision of the state machine. For additional control over security, you can encrypt your data using a customer-managed key for Step Functions state machines. You can configure a symmetric KMS key and data key reuse period when creating or updating a State Machine. The execution history and state machine definition will be encrypted with the key applied to the State Machine. This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes. CreateStateMachine is an idempotent API. Subsequent requests won\u2019t create a duplicate resource if it was already created. CreateStateMachine's idempotency check is based on the state machine name, definition, type, LoggingConfiguration, TracingConfiguration, and EncryptionConfiguration The check is also based on the publish and versionDescription parameters. If a following request has a different roleArn or tags, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even if they are different", options: [ { name: "--name", @@ -121,6 +128,13 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--encryption-configuration", + description: "Settings to configure server-side encryption", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -359,6 +373,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--included-data", + description: + "If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call DescribeStateMachine API with includedData = METADATA_ONLY to get a successful response without the encrypted definition", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -423,6 +445,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--included-data", + description: + "If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition. When calling a labelled ARN for an encrypted state machine, the includedData = METADATA_ONLY parameter will not apply because Step Functions needs to decrypt the entire state machine definition to get the Distributed Map state\u2019s definition. In this case, the API caller needs to have kms:Decrypt permission", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -487,6 +517,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--included-data", + description: + "If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1147,7 +1185,7 @@ const completionSpec: Fig.Spec = { { name: "send-task-failure", description: - "Used by activity workers, Task states using the callback pattern, and optionally Task states using the job run pattern to report that the task identified by the taskToken failed", + "Used by activity workers, Task states using the callback pattern, and optionally Task states using the job run pattern to report that the task identified by the taskToken failed. For an execution with encryption enabled, Step Functions will encrypt the error and cause fields using the KMS key for the execution role. A caller can mark a task as fail without using any KMS permissions in the execution role if the caller provides a null value for both error and cause fields because no data needs to be encrypted", options: [ { name: "--task-token", @@ -1355,6 +1393,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--included-data", + description: + "If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1377,7 +1423,7 @@ const completionSpec: Fig.Spec = { { name: "stop-execution", description: - "Stops an execution. This API action is not supported by EXPRESS state machines", + "Stops an execution. This API action is not supported by EXPRESS state machines. For an execution with encryption enabled, Step Functions will encrypt the error and cause fields using the KMS key for the execution role. A caller can stop an execution without using any KMS permissions in the execution role if the caller provides a null value for both error and cause fields because no data needs to be encrypted", options: [ { name: "--execution-arn", @@ -1623,7 +1669,7 @@ const completionSpec: Fig.Spec = { { name: "update-state-machine", description: - "Updates an existing state machine by modifying its definition, roleArn, or loggingConfiguration. Running executions will continue to use the previous definition and roleArn. You must include at least one of definition or roleArn or you will receive a MissingRequiredParameter error. A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName. A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN. The following are some examples of qualified and unqualified state machine ARNs: The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine. arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException. The following qualified state machine ARN refers to an alias named PROD. arn::states:::stateMachine: If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias. The following unqualified state machine ARN refers to a state machine named myStateMachine. arn::states:::stateMachine: After you update your state machine, you can set the publish parameter to true in the same action to publish a new version. This way, you can opt-in to strict versioning of your state machine. Step Functions assigns monotonically increasing integers for state machine versions, starting at version number 1. All StartExecution calls within a few seconds use the updated definition and roleArn. Executions started immediately after you call UpdateStateMachine may use the previous state machine definition and roleArn", + "Updates an existing state machine by modifying its definition, roleArn, loggingConfiguration, or EncryptionConfiguration. Running executions will continue to use the previous definition and roleArn. You must include at least one of definition or roleArn or you will receive a MissingRequiredParameter error. A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName. A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN. The following are some examples of qualified and unqualified state machine ARNs: The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine. arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException. The following qualified state machine ARN refers to an alias named PROD. arn::states:::stateMachine: If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias. The following unqualified state machine ARN refers to a state machine named myStateMachine. arn::states:::stateMachine: After you update your state machine, you can set the publish parameter to true in the same action to publish a new version. This way, you can opt-in to strict versioning of your state machine. Step Functions assigns monotonically increasing integers for state machine versions, starting at version number 1. All StartExecution calls within a few seconds use the updated definition and roleArn. Executions started immediately after you call UpdateStateMachine may use the previous state machine definition and roleArn", options: [ { name: "--state-machine-arn", @@ -1681,6 +1727,13 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--encryption-configuration", + description: "Settings to configure server-side encryption", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/tnb.ts b/src/aws/tnb.ts index b7260006db71..ad3ba96cea6a 100644 --- a/src/aws/tnb.ts +++ b/src/aws/tnb.ts @@ -248,7 +248,7 @@ const completionSpec: Fig.Spec = { { name: "get-sol-function-instance", description: - "Gets the details of a network function instance, including the instantation state and metadata from the function package descriptor in the network function package. A network function instance is a function in a function package", + "Gets the details of a network function instance, including the instantiation state and metadata from the function package descriptor in the network function package. A network function instance is a function in a function package", options: [ { name: "--vnf-instance-id", @@ -543,7 +543,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs", + "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs", args: { name: "map", }, @@ -776,6 +776,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--ns-instance-id", + description: + "Network instance id filter, to retrieve network operations associated to a network instance", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1056,7 +1064,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs", + "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs", args: { name: "map", }, @@ -1159,12 +1167,12 @@ const completionSpec: Fig.Spec = { { name: "update-sol-network-instance", description: - "Update a network instance. A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed", + "Update a network instance. A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed. Choose the updateType parameter to target the necessary update of the network instance", options: [ { name: "--modify-vnf-info-data", description: - "Identifies the network function information parameters and/or the configurable properties of the network function to be modified", + "Identifies the network function information parameters and/or the configurable properties of the network function to be modified. Include this property only if the update type is MODIFY_VNF_INFORMATION", args: { name: "structure", }, @@ -1179,14 +1187,23 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs", + "A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs", args: { name: "map", }, }, + { + name: "--update-ns", + description: + "Identifies the network service descriptor and the configurable properties of the descriptor, to be used for the update. Include this property only if the update type is UPDATE_NS", + args: { + name: "structure", + }, + }, { name: "--update-type", - description: "The type of update", + description: + "The type of update. Use the MODIFY_VNF_INFORMATION update type, to update a specific network function configuration, in the network instance. Use the UPDATE_NS update type, to update the network instance to a new network service descriptor", args: { name: "string", }, diff --git a/src/aws/workspaces.ts b/src/aws/workspaces.ts index 72956801b6a8..a36c20e795a0 100644 --- a/src/aws/workspaces.ts +++ b/src/aws/workspaces.ts @@ -692,7 +692,7 @@ const completionSpec: Fig.Spec = { { name: "create-workspaces", description: - "Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles. User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core", + "Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles. User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core. Review your running mode to ensure you are using one that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing?", options: [ { name: "--workspaces",