From 254f8de6610fb4306739df38b6bf34773743f7b5 Mon Sep 17 00:00:00 2001 From: AWS SDK For Ruby Date: Mon, 28 Oct 2024 18:13:05 +0000 Subject: [PATCH] Updated API models and rebuilt service gems. --- apis/mediapackagev2/2022-12-25/api-2.json | 451 ++++++++++++- apis/mediapackagev2/2022-12-25/docs-2.json | 220 +++++++ .../mediapackagev2/2022-12-25/examples-1.json | 467 ++++++++++++++ .../2022-12-25/paginators-1.json | 6 + apis/mediapackagev2/2022-12-25/waiters-2.json | 37 +- apis/opensearch/2021-01-01/api-2.json | 26 +- apis/opensearch/2021-01-01/docs-2.json | 27 + apis/rds/2014-10-31/api-2.json | 10 + apis/rds/2014-10-31/docs-2.json | 18 +- apis/storagegateway/2013-06-30/docs-2.json | 28 +- gems/aws-partitions/CHANGELOG.md | 5 + gems/aws-partitions/VERSION | 2 +- gems/aws-partitions/partitions.json | 1 + gems/aws-sdk-mediapackagev2/CHANGELOG.md | 5 + gems/aws-sdk-mediapackagev2/VERSION | 2 +- .../aws-sdk-mediapackagev2.gemspec | 2 +- .../lib/aws-sdk-mediapackagev2.rb | 4 +- .../lib/aws-sdk-mediapackagev2/client.rb | 607 +++++++++++++++++- .../lib/aws-sdk-mediapackagev2/client_api.rb | 210 ++++++ .../lib/aws-sdk-mediapackagev2/types.rb | 591 +++++++++++++++++ .../lib/aws-sdk-mediapackagev2/waiters.rb | 123 ++++ gems/aws-sdk-mediapackagev2/sig/client.rbs | 122 ++++ gems/aws-sdk-mediapackagev2/sig/types.rbs | 149 ++++- gems/aws-sdk-mediapackagev2/sig/waiters.rbs | 13 + gems/aws-sdk-opensearchservice/CHANGELOG.md | 5 + gems/aws-sdk-opensearchservice/VERSION | 2 +- .../lib/aws-sdk-opensearchservice.rb | 2 +- .../lib/aws-sdk-opensearchservice/client.rb | 57 +- .../aws-sdk-opensearchservice/client_api.rb | 16 + .../lib/aws-sdk-opensearchservice/types.rb | 51 +- gems/aws-sdk-opensearchservice/sig/client.rbs | 24 +- gems/aws-sdk-opensearchservice/sig/types.rbs | 14 + gems/aws-sdk-rds/CHANGELOG.md | 5 + gems/aws-sdk-rds/VERSION | 2 +- gems/aws-sdk-rds/lib/aws-sdk-rds.rb | 2 +- gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb | 150 ++++- .../aws-sdk-rds/lib/aws-sdk-rds/client_api.rb | 10 + .../aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb | 64 ++ .../lib/aws-sdk-rds/db_cluster_snapshot.rb | 53 ++ gems/aws-sdk-rds/lib/aws-sdk-rds/resource.rb | 5 + gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb | 180 +++++- gems/aws-sdk-rds/sig/client.rbs | 10 + gems/aws-sdk-rds/sig/db_cluster.rbs | 5 + gems/aws-sdk-rds/sig/db_cluster_snapshot.rbs | 5 + gems/aws-sdk-rds/sig/types.rbs | 10 + gems/aws-sdk-storagegateway/CHANGELOG.md | 5 + gems/aws-sdk-storagegateway/VERSION | 2 +- .../lib/aws-sdk-storagegateway.rb | 2 +- .../lib/aws-sdk-storagegateway/client.rb | 29 +- .../lib/aws-sdk-storagegateway/types.rb | 59 +- 50 files changed, 3838 insertions(+), 57 deletions(-) diff --git a/apis/mediapackagev2/2022-12-25/api-2.json b/apis/mediapackagev2/2022-12-25/api-2.json index a8ca5da4e85..a4db2b1eaad 100644 --- a/apis/mediapackagev2/2022-12-25/api-2.json +++ b/apis/mediapackagev2/2022-12-25/api-2.json @@ -2,6 +2,7 @@ "version":"2.0", "metadata":{ "apiVersion":"2022-12-25", + "auth":["aws.auth#sigv4"], "endpointPrefix":"mediapackagev2", "protocol":"rest-json", "protocols":["rest-json"], @@ -13,6 +14,25 @@ "uid":"mediapackagev2-2022-12-25" }, "operations":{ + "CancelHarvestJob":{ + "name":"CancelHarvestJob", + "http":{ + "method":"PUT", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/harvestJob/{HarvestJobName}", + "responseCode":200 + }, + "input":{"shape":"CancelHarvestJobRequest"}, + "output":{"shape":"CancelHarvestJobResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "idempotent":true + }, "CreateChannel":{ "name":"CreateChannel", "http":{ @@ -53,6 +73,26 @@ ], "idempotent":true }, + "CreateHarvestJob":{ + "name":"CreateHarvestJob", + "http":{ + "method":"POST", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/harvestJob", + "responseCode":200 + }, + "input":{"shape":"CreateHarvestJobRequest"}, + "output":{"shape":"CreateHarvestJobResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "idempotent":true + }, "CreateOriginEndpoint":{ "name":"CreateOriginEndpoint", "http":{ @@ -213,6 +253,23 @@ {"shape":"ResourceNotFoundException"} ] }, + "GetHarvestJob":{ + "name":"GetHarvestJob", + "http":{ + "method":"GET", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/harvestJob/{HarvestJobName}", + "responseCode":200 + }, + "input":{"shape":"GetHarvestJobRequest"}, + "output":{"shape":"GetHarvestJobResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "GetOriginEndpoint":{ "name":"GetOriginEndpoint", "http":{ @@ -280,6 +337,23 @@ {"shape":"ResourceNotFoundException"} ] }, + "ListHarvestJobs":{ + "name":"ListHarvestJobs", + "http":{ + "method":"GET", + "requestUri":"/channelGroup/{ChannelGroupName}/harvestJob", + "responseCode":200 + }, + "input":{"shape":"ListHarvestJobsRequest"}, + "output":{"shape":"ListHarvestJobsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "ListOriginEndpoints":{ "name":"ListOriginEndpoints", "http":{ @@ -459,6 +533,47 @@ "type":"boolean", "box":true }, + "CancelHarvestJobRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "HarvestJobName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"ChannelName" + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"OriginEndpointName" + }, + "HarvestJobName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"HarvestJobName" + }, + "ETag":{ + "shape":"EntityTag", + "location":"header", + "locationName":"x-amzn-update-if-match" + } + } + }, + "CancelHarvestJobResponse":{ + "type":"structure", + "members":{ + } + }, "ChannelGroupListConfiguration":{ "type":"structure", "required":[ @@ -668,6 +783,79 @@ "type":"list", "member":{"shape":"CreateDashManifestConfiguration"} }, + "CreateHarvestJobRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "HarvestedManifests", + "ScheduleConfiguration", + "Destination" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"ChannelName" + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"OriginEndpointName" + }, + "Description":{"shape":"ResourceDescription"}, + "HarvestedManifests":{"shape":"HarvestedManifests"}, + "ScheduleConfiguration":{"shape":"HarvesterScheduleConfiguration"}, + "Destination":{"shape":"Destination"}, + "ClientToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true, + "location":"header", + "locationName":"x-amzn-client-token" + }, + "HarvestJobName":{"shape":"ResourceName"}, + "Tags":{"shape":"TagMap"} + } + }, + "CreateHarvestJobResponse":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "Destination", + "HarvestJobName", + "HarvestedManifests", + "ScheduleConfiguration", + "Arn", + "CreatedAt", + "ModifiedAt", + "Status" + ], + "members":{ + "ChannelGroupName":{"shape":"ResourceName"}, + "ChannelName":{"shape":"ResourceName"}, + "OriginEndpointName":{"shape":"ResourceName"}, + "Destination":{"shape":"Destination"}, + "HarvestJobName":{"shape":"ResourceName"}, + "HarvestedManifests":{"shape":"HarvestedManifests"}, + "Description":{"shape":"ResourceDescription"}, + "ScheduleConfiguration":{"shape":"HarvesterScheduleConfiguration"}, + "Arn":{"shape":"String"}, + "CreatedAt":{"shape":"Timestamp"}, + "ModifiedAt":{"shape":"Timestamp"}, + "Status":{"shape":"HarvestJobStatus"}, + "ErrorMessage":{"shape":"String"}, + "ETag":{"shape":"EntityTag"}, + "Tags":{"shape":"TagMap"} + } + }, "CreateHlsManifestConfiguration":{ "type":"structure", "required":["ManifestName"], @@ -970,6 +1158,13 @@ "members":{ } }, + "Destination":{ + "type":"structure", + "required":["S3Destination"], + "members":{ + "S3Destination":{"shape":"S3DestinationConfig"} + } + }, "DrmSystem":{ "type":"string", "enum":[ @@ -1205,6 +1400,70 @@ "type":"list", "member":{"shape":"GetDashManifestConfiguration"} }, + "GetHarvestJobRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "HarvestJobName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"ChannelName" + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"OriginEndpointName" + }, + "HarvestJobName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"HarvestJobName" + } + } + }, + "GetHarvestJobResponse":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "Destination", + "HarvestJobName", + "HarvestedManifests", + "ScheduleConfiguration", + "Arn", + "CreatedAt", + "ModifiedAt", + "Status" + ], + "members":{ + "ChannelGroupName":{"shape":"ResourceName"}, + "ChannelName":{"shape":"ResourceName"}, + "OriginEndpointName":{"shape":"ResourceName"}, + "Destination":{"shape":"Destination"}, + "HarvestJobName":{"shape":"ResourceName"}, + "HarvestedManifests":{"shape":"HarvestedManifests"}, + "Description":{"shape":"ResourceDescription"}, + "ScheduleConfiguration":{"shape":"HarvesterScheduleConfiguration"}, + "Arn":{"shape":"String"}, + "CreatedAt":{"shape":"Timestamp"}, + "ModifiedAt":{"shape":"Timestamp"}, + "Status":{"shape":"HarvestJobStatus"}, + "ErrorMessage":{"shape":"String"}, + "ETag":{"shape":"EntityTag"}, + "Tags":{"shape":"TagMap"} + } + }, "GetHlsManifestConfiguration":{ "type":"structure", "required":[ @@ -1343,6 +1602,104 @@ "Tags":{"shape":"TagMap"} } }, + "HarvestJob":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "Destination", + "HarvestJobName", + "HarvestedManifests", + "ScheduleConfiguration", + "Arn", + "CreatedAt", + "ModifiedAt", + "Status" + ], + "members":{ + "ChannelGroupName":{"shape":"ResourceName"}, + "ChannelName":{"shape":"ResourceName"}, + "OriginEndpointName":{"shape":"ResourceName"}, + "Destination":{"shape":"Destination"}, + "HarvestJobName":{"shape":"ResourceName"}, + "HarvestedManifests":{"shape":"HarvestedManifests"}, + "Description":{"shape":"ResourceDescription"}, + "ScheduleConfiguration":{"shape":"HarvesterScheduleConfiguration"}, + "Arn":{"shape":"String"}, + "CreatedAt":{"shape":"Timestamp"}, + "ModifiedAt":{"shape":"Timestamp"}, + "Status":{"shape":"HarvestJobStatus"}, + "ErrorMessage":{"shape":"String"}, + "ETag":{"shape":"EntityTag"} + } + }, + "HarvestJobStatus":{ + "type":"string", + "enum":[ + "QUEUED", + "IN_PROGRESS", + "CANCELLED", + "COMPLETED", + "FAILED" + ] + }, + "HarvestJobsList":{ + "type":"list", + "member":{"shape":"HarvestJob"} + }, + "HarvestedDashManifest":{ + "type":"structure", + "required":["ManifestName"], + "members":{ + "ManifestName":{"shape":"ResourceName"} + } + }, + "HarvestedDashManifestsList":{ + "type":"list", + "member":{"shape":"HarvestedDashManifest"} + }, + "HarvestedHlsManifest":{ + "type":"structure", + "required":["ManifestName"], + "members":{ + "ManifestName":{"shape":"ResourceName"} + } + }, + "HarvestedHlsManifestsList":{ + "type":"list", + "member":{"shape":"HarvestedHlsManifest"} + }, + "HarvestedLowLatencyHlsManifest":{ + "type":"structure", + "required":["ManifestName"], + "members":{ + "ManifestName":{"shape":"ResourceName"} + } + }, + "HarvestedLowLatencyHlsManifestsList":{ + "type":"list", + "member":{"shape":"HarvestedLowLatencyHlsManifest"} + }, + "HarvestedManifests":{ + "type":"structure", + "members":{ + "HlsManifests":{"shape":"HarvestedHlsManifestsList"}, + "DashManifests":{"shape":"HarvestedDashManifestsList"}, + "LowLatencyHlsManifests":{"shape":"HarvestedLowLatencyHlsManifestsList"} + } + }, + "HarvesterScheduleConfiguration":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime" + ], + "members":{ + "StartTime":{"shape":"Timestamp"}, + "EndTime":{"shape":"Timestamp"} + } + }, "IdempotencyToken":{ "type":"string", "max":256, @@ -1442,6 +1799,67 @@ "type":"list", "member":{"shape":"ListDashManifestConfiguration"} }, + "ListHarvestJobsRequest":{ + "type":"structure", + "required":["ChannelGroupName"], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ListHarvestJobsRequestChannelNameString", + "location":"querystring", + "locationName":"channelName" + }, + "OriginEndpointName":{ + "shape":"ListHarvestJobsRequestOriginEndpointNameString", + "location":"querystring", + "locationName":"originEndpointName" + }, + "Status":{ + "shape":"HarvestJobStatus", + "location":"querystring", + "locationName":"includeStatus" + }, + "MaxResults":{ + "shape":"ListHarvestJobsRequestMaxResultsInteger", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListHarvestJobsRequestChannelNameString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ListHarvestJobsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListHarvestJobsRequestOriginEndpointNameString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ListHarvestJobsResponse":{ + "type":"structure", + "members":{ + "Items":{"shape":"HarvestJobsList"}, + "NextToken":{"shape":"String"} + } + }, "ListHlsManifestConfiguration":{ "type":"structure", "required":["ManifestName"], @@ -1680,9 +2098,32 @@ "enum":[ "CHANNEL_GROUP", "CHANNEL", - "ORIGIN_ENDPOINT" + "ORIGIN_ENDPOINT", + "HARVEST_JOB" ] }, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3 + }, + "S3DestinationConfig":{ + "type":"structure", + "required":[ + "BucketName", + "DestinationPath" + ], + "members":{ + "BucketName":{"shape":"S3BucketName"}, + "DestinationPath":{"shape":"S3DestinationPath"} + } + }, + "S3DestinationPath":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\S]+" + }, "Scte":{ "type":"structure", "members":{ @@ -2099,6 +2540,14 @@ "DRM_SIGNALING_MISMATCH_SEGMENT_ENCRYPTION_STATUS", "ONLY_CMAF_INPUT_TYPE_ALLOW_FORCE_ENDPOINT_ERROR_CONFIGURATION", "SOURCE_DISRUPTIONS_ENABLED_INCORRECTLY", + "HARVESTED_MANIFEST_HAS_START_END_FILTER_CONFIGURATION", + "HARVESTED_MANIFEST_NOT_FOUND_ON_ENDPOINT", + "TOO_MANY_IN_PROGRESS_HARVEST_JOBS", + "HARVEST_JOB_INELIGIBLE_FOR_CANCELLATION", + "INVALID_HARVEST_JOB_DURATION", + "HARVEST_JOB_S3_DESTINATION_MISSING_OR_INCOMPLETE", + "HARVEST_JOB_UNABLE_TO_WRITE_TO_S3_DESTINATION", + "HARVEST_JOB_CUSTOMER_ENDPOINT_READ_ACCESS_DENIED", "CLIP_START_TIME_WITH_START_OR_END", "START_TAG_TIME_OFFSET_INVALID" ] diff --git a/apis/mediapackagev2/2022-12-25/docs-2.json b/apis/mediapackagev2/2022-12-25/docs-2.json index c54816fb9fc..748a16fb3d8 100644 --- a/apis/mediapackagev2/2022-12-25/docs-2.json +++ b/apis/mediapackagev2/2022-12-25/docs-2.json @@ -2,8 +2,10 @@ "version": "2.0", "service": "

This guide is intended for creating AWS Elemental MediaPackage resources in MediaPackage Version 2 (v2) starting from May 2023. To get started with MediaPackage v2, create your MediaPackage resources. There isn't an automated process to migrate your resources from MediaPackage v1 to MediaPackage v2.

The names of the entities that you use to access this API, like URLs and ARNs, all have the versioning information added, like \"v2\", to distinguish from the prior version. If you used MediaPackage prior to this release, you can't use the MediaPackage v2 CLI or the MediaPackage v2 API to access any MediaPackage v1 resources.

If you created resources in MediaPackage v1, use video on demand (VOD) workflows, and aren't looking to migrate to MediaPackage v2 yet, see the MediaPackage v1 Live API Reference.

This is the AWS Elemental MediaPackage v2 Live REST API Reference. It describes all the MediaPackage API operations for live content in detail, and provides sample requests, responses, and errors for the supported web services protocols.

We assume that you have the IAM permissions that you need to use MediaPackage via the REST API. We also assume that you are familiar with the features and operations of MediaPackage, as described in the AWS Elemental MediaPackage User Guide.

", "operations": { + "CancelHarvestJob": "

Cancels an in-progress harvest job.

", "CreateChannel": "

Create a channel to start receiving content streams. The channel represents the input to MediaPackage for incoming live content from an encoder such as AWS Elemental MediaLive. The channel receives content, and after packaging it, outputs it through an origin endpoint to downstream devices (such as video players or CDNs) that request the content. You can create only one channel with each request. We recommend that you spread out channels between channel groups, such as putting redundant channels in the same AWS Region in different channel groups.

", "CreateChannelGroup": "

Create a channel group to group your channels and origin endpoints. A channel group is the top-level resource that consists of channels and origin endpoints that are associated with it and that provides predictable URLs for stream delivery. All channels and origin endpoints within the channel group are guaranteed to share the DNS. You can create only one channel group with each request.

", + "CreateHarvestJob": "

Creates a new harvest job to export content from a MediaPackage v2 channel to an S3 bucket.

", "CreateOriginEndpoint": "

The endpoint is attached to a channel, and represents the output of the live content. You can associate multiple endpoints to a single channel. Each endpoint gives players and downstream CDNs (such as Amazon CloudFront) access to the content for playback. Content can't be served from a channel until it has an endpoint. You can create only one endpoint with each request.

", "DeleteChannel": "

Delete a channel to stop AWS Elemental MediaPackage from receiving further content. You must delete the channel's origin endpoints before you can delete the channel.

", "DeleteChannelGroup": "

Delete a channel group. You must delete the channel group's channels and origin endpoints before you can delete the channel group. If you delete a channel group, you'll lose access to the egress domain and will have to create a new channel group to replace it.

", @@ -13,10 +15,12 @@ "GetChannel": "

Retrieves the specified channel that's configured in AWS Elemental MediaPackage, including the origin endpoints that are associated with it.

", "GetChannelGroup": "

Retrieves the specified channel group that's configured in AWS Elemental MediaPackage, including the channels and origin endpoints that are associated with it.

", "GetChannelPolicy": "

Retrieves the specified channel policy that's configured in AWS Elemental MediaPackage. With policies, you can specify who has access to AWS resources and what actions they can perform on those resources.

", + "GetHarvestJob": "

Retrieves the details of a specific harvest job.

", "GetOriginEndpoint": "

Retrieves the specified origin endpoint that's configured in AWS Elemental MediaPackage to obtain its playback URL and to view the packaging settings that it's currently using.

", "GetOriginEndpointPolicy": "

Retrieves the specified origin endpoint policy that's configured in AWS Elemental MediaPackage.

", "ListChannelGroups": "

Retrieves all channel groups that are configured in AWS Elemental MediaPackage, including the channels and origin endpoints that are associated with it.

", "ListChannels": "

Retrieves all channels in a specific channel group that are configured in AWS Elemental MediaPackage, including the origin endpoints that are associated with it.

", + "ListHarvestJobs": "

Retrieves a list of harvest jobs that match the specified criteria.

", "ListOriginEndpoints": "

Retrieves all origin endpoints in a specific channel that are configured in AWS Elemental MediaPackage.

", "ListTagsForResource": "

Lists the tags assigned to a resource.

", "PutChannelPolicy": "

Attaches an IAM policy to the specified channel. With policies, you can specify who has access to AWS resources and what actions they can perform on those resources. You can attach only one policy with each request.

", @@ -54,6 +58,16 @@ "StartTag$Precise": "

Specify the value for PRECISE within your EXT-X-START tag. Leave blank, or choose false, to use the default value NO. Choose yes to use the value YES.

" } }, + "CancelHarvestJobRequest": { + "base": null, + "refs": { + } + }, + "CancelHarvestJobResponse": { + "base": null, + "refs": { + } + }, "ChannelGroupListConfiguration": { "base": "

The configuration of the channel group.

", "refs": { @@ -163,6 +177,16 @@ "UpdateOriginEndpointRequest$DashManifests": "

A DASH manifest configuration.

" } }, + "CreateHarvestJobRequest": { + "base": "

The request object for creating a new harvest job.

", + "refs": { + } + }, + "CreateHarvestJobResponse": { + "base": "

The response object returned after creating a harvest job.

", + "refs": { + } + }, "CreateHlsManifestConfiguration": { "base": "

Create an HTTP live streaming (HLS) manifest configuration.

", "refs": { @@ -325,6 +349,15 @@ "refs": { } }, + "Destination": { + "base": "

The configuration for the destination where the harvested content will be exported.

", + "refs": { + "CreateHarvestJobRequest$Destination": "

The S3 destination where the harvested content will be placed.

", + "CreateHarvestJobResponse$Destination": "

The S3 destination where the harvested content will be placed.

", + "GetHarvestJobResponse$Destination": "

The S3 destination where the harvested content is being placed.

", + "HarvestJob$Destination": "

The S3 destination where the harvested content will be placed.

" + } + }, "DrmSystem": { "base": null, "refs": { @@ -376,12 +409,16 @@ "EntityTag": { "base": null, "refs": { + "CancelHarvestJobRequest$ETag": "

The current Entity Tag (ETag) associated with the harvest job. Used for concurrency control.

", "CreateChannelGroupResponse$ETag": "

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

", "CreateChannelResponse$ETag": "

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

", + "CreateHarvestJobResponse$ETag": "

The current version of the harvest job. Used for concurrency control.

", "CreateOriginEndpointResponse$ETag": "

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

", "GetChannelGroupResponse$ETag": "

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

", "GetChannelResponse$ETag": "

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

", + "GetHarvestJobResponse$ETag": "

The current version of the harvest job. Used for concurrency control.

", "GetOriginEndpointResponse$ETag": "

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

", + "HarvestJob$ETag": "

The current version of the harvest job. Used for concurrency control.

", "UpdateChannelGroupRequest$ETag": "

The expected current Entity Tag (ETag) for the resource. If the specified ETag does not match the resource's current entity tag, the update request will be rejected.

", "UpdateChannelGroupResponse$ETag": "

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

", "UpdateChannelRequest$ETag": "

The expected current Entity Tag (ETag) for the resource. If the specified ETag does not match the resource's current entity tag, the update request will be rejected.

", @@ -474,6 +511,16 @@ "UpdateOriginEndpointResponse$DashManifests": "

A DASH manifest configuration.

" } }, + "GetHarvestJobRequest": { + "base": "

The request object for retrieving a specific harvest job.

", + "refs": { + } + }, + "GetHarvestJobResponse": { + "base": "

The response object containing the details of the requested harvest job.

", + "refs": { + } + }, "GetHlsManifestConfiguration": { "base": "

Retrieve the HTTP live streaming (HLS) manifest configuration.

", "refs": { @@ -522,11 +569,87 @@ "refs": { } }, + "HarvestJob": { + "base": "

Represents a harvest job resource in MediaPackage v2, which is used to export content from an origin endpoint to an S3 bucket.

", + "refs": { + "HarvestJobsList$member": null + } + }, + "HarvestJobStatus": { + "base": null, + "refs": { + "CreateHarvestJobResponse$Status": "

The current status of the harvest job (e.g., CREATED, IN_PROGRESS, ABORTED, COMPLETED, FAILED).

", + "GetHarvestJobResponse$Status": "

The current status of the harvest job (e.g., QUEUED, IN_PROGRESS, CANCELLED, COMPLETED, FAILED).

", + "HarvestJob$Status": "

The current status of the harvest job (e.g., QUEUED, IN_PROGRESS, CANCELLED, COMPLETED, FAILED).

", + "ListHarvestJobsRequest$Status": "

The status to filter the harvest jobs by. If specified, only harvest jobs with this status will be returned.

" + } + }, + "HarvestJobsList": { + "base": null, + "refs": { + "ListHarvestJobsResponse$Items": "

An array of harvest job objects that match the specified criteria.

" + } + }, + "HarvestedDashManifest": { + "base": "

Information about a harvested DASH manifest.

", + "refs": { + "HarvestedDashManifestsList$member": null + } + }, + "HarvestedDashManifestsList": { + "base": null, + "refs": { + "HarvestedManifests$DashManifests": "

A list of harvested DASH manifests.

" + } + }, + "HarvestedHlsManifest": { + "base": "

Information about a harvested HLS manifest.

", + "refs": { + "HarvestedHlsManifestsList$member": null + } + }, + "HarvestedHlsManifestsList": { + "base": null, + "refs": { + "HarvestedManifests$HlsManifests": "

A list of harvested HLS manifests.

" + } + }, + "HarvestedLowLatencyHlsManifest": { + "base": "

Information about a harvested Low-Latency HLS manifest.

", + "refs": { + "HarvestedLowLatencyHlsManifestsList$member": null + } + }, + "HarvestedLowLatencyHlsManifestsList": { + "base": null, + "refs": { + "HarvestedManifests$LowLatencyHlsManifests": "

A list of harvested Low-Latency HLS manifests.

" + } + }, + "HarvestedManifests": { + "base": "

A collection of harvested manifests of different types.

", + "refs": { + "CreateHarvestJobRequest$HarvestedManifests": "

A list of manifests to be harvested.

", + "CreateHarvestJobResponse$HarvestedManifests": "

A list of manifests that will be harvested.

", + "GetHarvestJobResponse$HarvestedManifests": "

A list of manifests that are being or have been harvested.

", + "HarvestJob$HarvestedManifests": "

A list of manifests that are being or have been harvested.

" + } + }, + "HarvesterScheduleConfiguration": { + "base": "

Defines the schedule configuration for a harvest job.

", + "refs": { + "CreateHarvestJobRequest$ScheduleConfiguration": "

The configuration for when the harvest job should run, including start and end times.

", + "CreateHarvestJobResponse$ScheduleConfiguration": "

The configuration for when the harvest job will run, including start and end times.

", + "GetHarvestJobResponse$ScheduleConfiguration": "

The configuration for when the harvest job is scheduled to run, including start and end times.

", + "HarvestJob$ScheduleConfiguration": "

The configuration for when the harvest job is scheduled to run.

" + } + }, "IdempotencyToken": { "base": null, "refs": { "CreateChannelGroupRequest$ClientToken": "

A unique, case-sensitive token that you provide to ensure the idempotency of the request.

", "CreateChannelRequest$ClientToken": "

A unique, case-sensitive token that you provide to ensure the idempotency of the request.

", + "CreateHarvestJobRequest$ClientToken": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", "CreateOriginEndpointRequest$ClientToken": "

A unique, case-sensitive token that you provide to ensure the idempotency of the request.

" } }, @@ -607,6 +730,34 @@ "OriginEndpointListConfiguration$DashManifests": "

A DASH manifest configuration.

" } }, + "ListHarvestJobsRequest": { + "base": "

The request object for listing harvest jobs.

", + "refs": { + } + }, + "ListHarvestJobsRequestChannelNameString": { + "base": null, + "refs": { + "ListHarvestJobsRequest$ChannelName": "

The name of the channel to filter the harvest jobs by. If specified, only harvest jobs associated with this channel will be returned.

" + } + }, + "ListHarvestJobsRequestMaxResultsInteger": { + "base": null, + "refs": { + "ListHarvestJobsRequest$MaxResults": "

The maximum number of harvest jobs to return in a single request. If not specified, a default value will be used.

" + } + }, + "ListHarvestJobsRequestOriginEndpointNameString": { + "base": null, + "refs": { + "ListHarvestJobsRequest$OriginEndpointName": "

The name of the origin endpoint to filter the harvest jobs by. If specified, only harvest jobs associated with this origin endpoint will be returned.

" + } + }, + "ListHarvestJobsResponse": { + "base": "

The response object containing the list of harvest jobs that match the specified criteria.

", + "refs": { + } + }, "ListHlsManifestConfiguration": { "base": "

List the HTTP live streaming (HLS) manifest configuration.

", "refs": { @@ -731,11 +882,15 @@ "CreateChannelGroupResponse$Description": "

The description for your channel group.

", "CreateChannelRequest$Description": "

Enter any descriptive text that helps you to identify the channel.

", "CreateChannelResponse$Description": "

The description for your channel.

", + "CreateHarvestJobRequest$Description": "

An optional description for the harvest job.

", + "CreateHarvestJobResponse$Description": "

The description of the harvest job, if provided.

", "CreateOriginEndpointRequest$Description": "

Enter any descriptive text that helps you to identify the origin endpoint.

", "CreateOriginEndpointResponse$Description": "

The description for your origin endpoint.

", "GetChannelGroupResponse$Description": "

The description for your channel group.

", "GetChannelResponse$Description": "

The description for your channel.

", + "GetHarvestJobResponse$Description": "

The description of the harvest job, if provided.

", "GetOriginEndpointResponse$Description": "

The description for your origin endpoint.

", + "HarvestJob$Description": "

An optional description of the harvest job.

", "OriginEndpointListConfiguration$Description": "

Any descriptive information that you want to add to the origin endpoint for future identification purposes.

", "UpdateChannelGroupRequest$Description": "

Any descriptive information that you want to add to the channel group for future identification purposes.

", "UpdateChannelGroupResponse$Description": "

The description for your channel group.

", @@ -748,9 +903,21 @@ "ResourceName": { "base": null, "refs": { + "CancelHarvestJobRequest$ChannelGroupName": "

The name of the channel group containing the channel from which the harvest job is running.

", + "CancelHarvestJobRequest$ChannelName": "

The name of the channel from which the harvest job is running.

", + "CancelHarvestJobRequest$OriginEndpointName": "

The name of the origin endpoint that the harvest job is harvesting from. This cannot be changed after the harvest job is submitted.

", + "CancelHarvestJobRequest$HarvestJobName": "

The name of the harvest job to cancel. This name must be unique within the channel and cannot be changed after the harvest job is submitted.

", "CreateChannelGroupRequest$ChannelGroupName": "

The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region. You can't use spaces in the name. You can't change the name after you create the channel group.

", "CreateChannelRequest$ChannelGroupName": "

The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

", "CreateChannelRequest$ChannelName": "

The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group. You can't change the name after you create the channel.

", + "CreateHarvestJobRequest$ChannelGroupName": "

The name of the channel group containing the channel from which to harvest content.

", + "CreateHarvestJobRequest$ChannelName": "

The name of the channel from which to harvest content.

", + "CreateHarvestJobRequest$OriginEndpointName": "

The name of the origin endpoint from which to harvest content.

", + "CreateHarvestJobRequest$HarvestJobName": "

A name for the harvest job. This name must be unique within the channel.

", + "CreateHarvestJobResponse$ChannelGroupName": "

The name of the channel group containing the channel from which content is being harvested.

", + "CreateHarvestJobResponse$ChannelName": "

The name of the channel from which content is being harvested.

", + "CreateHarvestJobResponse$OriginEndpointName": "

The name of the origin endpoint from which content is being harvested.

", + "CreateHarvestJobResponse$HarvestJobName": "

The name of the created harvest job.

", "CreateOriginEndpointRequest$ChannelGroupName": "

The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

", "CreateOriginEndpointRequest$ChannelName": "

The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

", "CreateOriginEndpointRequest$OriginEndpointName": "

The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and must be unique for your account in the AWS Region and channel. You can't use spaces in the name. You can't change the name after you create the endpoint.

", @@ -774,6 +941,14 @@ "GetChannelRequest$ChannelGroupName": "

The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

", "GetChannelRequest$ChannelName": "

The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

", "GetDashManifestConfiguration$ManifestName": "

A short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index.

", + "GetHarvestJobRequest$ChannelGroupName": "

The name of the channel group containing the channel associated with the harvest job.

", + "GetHarvestJobRequest$ChannelName": "

The name of the channel associated with the harvest job.

", + "GetHarvestJobRequest$OriginEndpointName": "

The name of the origin endpoint associated with the harvest job.

", + "GetHarvestJobRequest$HarvestJobName": "

The name of the harvest job to retrieve.

", + "GetHarvestJobResponse$ChannelGroupName": "

The name of the channel group containing the channel associated with the harvest job.

", + "GetHarvestJobResponse$ChannelName": "

The name of the channel associated with the harvest job.

", + "GetHarvestJobResponse$OriginEndpointName": "

The name of the origin endpoint associated with the harvest job.

", + "GetHarvestJobResponse$HarvestJobName": "

The name of the harvest job.

", "GetHlsManifestConfiguration$ManifestName": "

A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index. MediaPackage automatically inserts the format extension, such as .m3u8. You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

", "GetHlsManifestConfiguration$ChildManifestName": "

A short string that's appended to the endpoint URL. The child manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default child manifest name, index_1. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

", "GetLowLatencyHlsManifestConfiguration$ManifestName": "

A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index. MediaPackage automatically inserts the format extension, such as .m3u8. You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

", @@ -790,8 +965,16 @@ "GetOriginEndpointResponse$ChannelGroupName": "

The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

", "GetOriginEndpointResponse$ChannelName": "

The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

", "GetOriginEndpointResponse$OriginEndpointName": "

The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

", + "HarvestJob$ChannelGroupName": "

The name of the channel group containing the channel associated with this harvest job.

", + "HarvestJob$ChannelName": "

The name of the channel associated with this harvest job.

", + "HarvestJob$OriginEndpointName": "

The name of the origin endpoint associated with this harvest job.

", + "HarvestJob$HarvestJobName": "

The name of the harvest job.

", + "HarvestedDashManifest$ManifestName": "

The name of the harvested DASH manifest.

", + "HarvestedHlsManifest$ManifestName": "

The name of the harvested HLS manifest.

", + "HarvestedLowLatencyHlsManifest$ManifestName": "

The name of the harvested Low-Latency HLS manifest.

", "ListChannelsRequest$ChannelGroupName": "

The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

", "ListDashManifestConfiguration$ManifestName": "

A short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index.

", + "ListHarvestJobsRequest$ChannelGroupName": "

The name of the channel group to filter the harvest jobs by. If specified, only harvest jobs associated with channels in this group will be returned.

", "ListHlsManifestConfiguration$ManifestName": "

A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index. MediaPackage automatically inserts the format extension, such as .m3u8. You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

", "ListHlsManifestConfiguration$ChildManifestName": "

A short string that's appended to the endpoint URL. The child manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default child manifest name, index_1. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

", "ListLowLatencyHlsManifestConfiguration$ManifestName": "

A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index. MediaPackage automatically inserts the format extension, such as .m3u8. You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

", @@ -828,6 +1011,24 @@ "ResourceNotFoundException$ResourceTypeNotFound": "

The specified resource type wasn't found.

" } }, + "S3BucketName": { + "base": null, + "refs": { + "S3DestinationConfig$BucketName": "

The name of an S3 bucket within which harvested content will be exported.

" + } + }, + "S3DestinationConfig": { + "base": "

Configuration parameters for where in an S3 bucket to place the harvested content.

", + "refs": { + "Destination$S3Destination": "

The configuration for exporting harvested content to an S3 bucket. This includes details such as the bucket name and destination path within the bucket.

" + } + }, + "S3DestinationPath": { + "base": null, + "refs": { + "S3DestinationConfig$DestinationPath": "

The path within the specified S3 bucket where the harvested content will be placed.

" + } + }, "Scte": { "base": "

The SCTE configuration.

", "refs": { @@ -944,6 +1145,8 @@ "CreateChannelResponse$Arn": "

The Amazon Resource Name (ARN) associated with the resource.

", "CreateChannelResponse$ChannelName": "

The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

", "CreateChannelResponse$ChannelGroupName": "

The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

", + "CreateHarvestJobResponse$Arn": "

The Amazon Resource Name (ARN) of the created harvest job.

", + "CreateHarvestJobResponse$ErrorMessage": "

An error message if the harvest job creation failed.

", "CreateOriginEndpointResponse$Arn": "

The Amazon Resource Name (ARN) associated with the resource.

", "GetChannelGroupResponse$ChannelGroupName": "

The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

", "GetChannelGroupResponse$Arn": "

The Amazon Resource Name (ARN) associated with the resource.

", @@ -954,9 +1157,13 @@ "GetChannelResponse$ChannelName": "

The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

", "GetChannelResponse$ChannelGroupName": "

The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

", "GetDashManifestConfiguration$Url": "

The egress domain URL for stream delivery from MediaPackage.

", + "GetHarvestJobResponse$Arn": "

The Amazon Resource Name (ARN) of the harvest job.

", + "GetHarvestJobResponse$ErrorMessage": "

An error message if the harvest job encountered any issues.

", "GetHlsManifestConfiguration$Url": "

The egress domain URL for stream delivery from MediaPackage.

", "GetLowLatencyHlsManifestConfiguration$Url": "

The egress domain URL for stream delivery from MediaPackage.

", "GetOriginEndpointResponse$Arn": "

The Amazon Resource Name (ARN) associated with the resource.

", + "HarvestJob$Arn": "

The Amazon Resource Name (ARN) of the harvest job.

", + "HarvestJob$ErrorMessage": "

An error message if the harvest job encountered any issues.

", "IngestEndpoint$Id": "

The system-generated unique identifier for the IngestEndpoint.

", "IngestEndpoint$Url": "

The ingest domain URL where the source stream should be sent.

", "InternalServerException$Message": null, @@ -965,6 +1172,8 @@ "ListChannelsRequest$NextToken": "

The pagination token from the GET list request. Use the token to fetch the next page of results.

", "ListChannelsResponse$NextToken": "

The pagination token from the GET list request.

", "ListDashManifestConfiguration$Url": "

The egress domain URL for stream delivery from MediaPackage.

", + "ListHarvestJobsRequest$NextToken": "

A token used for pagination. Provide this value in subsequent requests to retrieve the next set of results.

", + "ListHarvestJobsResponse$NextToken": "

A token used for pagination. Include this value in subsequent requests to retrieve the next set of results. If null, there are no more results to retrieve.

", "ListHlsManifestConfiguration$Url": "

The egress domain URL for stream delivery from MediaPackage.

", "ListLowLatencyHlsManifestConfiguration$Url": "

The egress domain URL for stream delivery from MediaPackage.

", "ListOriginEndpointsRequest$NextToken": "

The pagination token from the GET list request. Use the token to fetch the next page of results.

", @@ -1011,10 +1220,13 @@ "CreateChannelGroupResponse$Tags": "

The comma-separated list of tag key:value pairs assigned to the channel group.

", "CreateChannelRequest$Tags": "

A comma-separated list of tag key:value pairs that you define. For example:

\"Key1\": \"Value1\",

\"Key2\": \"Value2\"

", "CreateChannelResponse$Tags": "

The comma-separated list of tag key:value pairs assigned to the channel.

", + "CreateHarvestJobRequest$Tags": "

A collection of tags associated with the harvest job.

", + "CreateHarvestJobResponse$Tags": "

A collection of tags associated with the harvest job.

", "CreateOriginEndpointRequest$Tags": "

A comma-separated list of tag key:value pairs that you define. For example:

\"Key1\": \"Value1\",

\"Key2\": \"Value2\"

", "CreateOriginEndpointResponse$Tags": "

The comma-separated list of tag key:value pairs assigned to the origin endpoint.

", "GetChannelGroupResponse$Tags": "

The comma-separated list of tag key:value pairs assigned to the channel group.

", "GetChannelResponse$Tags": "

The comma-separated list of tag key:value pairs assigned to the channel.

", + "GetHarvestJobResponse$Tags": "

A collection of tags associated with the harvest job.

", "GetOriginEndpointResponse$Tags": "

The comma-separated list of tag key:value pairs assigned to the origin endpoint.

", "ListTagsForResourceResponse$Tags": "

Contains a map of the key-value pairs for the resource tag or tags assigned to the resource.

", "TagResourceRequest$Tags": "

Contains a map of the key-value pairs for the resource tag or tags assigned to the resource.

", @@ -1050,6 +1262,8 @@ "CreateChannelGroupResponse$ModifiedAt": "

The date and time the channel group was modified.

", "CreateChannelResponse$CreatedAt": "

The date and time the channel was created.

", "CreateChannelResponse$ModifiedAt": "

The date and time the channel was modified.

", + "CreateHarvestJobResponse$CreatedAt": "

The date and time the harvest job was created.

", + "CreateHarvestJobResponse$ModifiedAt": "

The date and time the harvest job was last modified.

", "CreateOriginEndpointResponse$CreatedAt": "

The date and time the origin endpoint was created.

", "CreateOriginEndpointResponse$ModifiedAt": "

The date and time the origin endpoint was modified.

", "FilterConfiguration$Start": "

Optionally specify the start time for all of your manifest egress requests. When you include start time, note that you cannot use start time query parameters for this manifest's endpoint URL.

", @@ -1059,8 +1273,14 @@ "GetChannelGroupResponse$ModifiedAt": "

The date and time the channel group was modified.

", "GetChannelResponse$CreatedAt": "

The date and time the channel was created.

", "GetChannelResponse$ModifiedAt": "

The date and time the channel was modified.

", + "GetHarvestJobResponse$CreatedAt": "

The date and time when the harvest job was created.

", + "GetHarvestJobResponse$ModifiedAt": "

The date and time when the harvest job was last modified.

", "GetOriginEndpointResponse$CreatedAt": "

The date and time the origin endpoint was created.

", "GetOriginEndpointResponse$ModifiedAt": "

The date and time the origin endpoint was modified.

", + "HarvestJob$CreatedAt": "

The date and time when the harvest job was created.

", + "HarvestJob$ModifiedAt": "

The date and time when the harvest job was last modified.

", + "HarvesterScheduleConfiguration$StartTime": "

The start time for the harvest job.

", + "HarvesterScheduleConfiguration$EndTime": "

The end time for the harvest job.

", "OriginEndpointListConfiguration$CreatedAt": "

The date and time the origin endpoint was created.

", "OriginEndpointListConfiguration$ModifiedAt": "

The date and time the origin endpoint was modified.

", "UpdateChannelGroupResponse$CreatedAt": "

The date and time the channel group was created.

", diff --git a/apis/mediapackagev2/2022-12-25/examples-1.json b/apis/mediapackagev2/2022-12-25/examples-1.json index d0878ded910..da41ac3be2d 100644 --- a/apis/mediapackagev2/2022-12-25/examples-1.json +++ b/apis/mediapackagev2/2022-12-25/examples-1.json @@ -1,6 +1,20 @@ { "version": "1.0", "examples": { + "CancelHarvestJob": [ + { + "input": { + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "HarvestJobName": "HarvestJobName", + "OriginEndpointName": "exampleOriginEndpointName" + }, + "output": { + }, + "id": "example-1", + "title": "Cancel a Harvest Job" + } + ], "CreateChannel": [ { "input": { @@ -68,6 +82,88 @@ "title": "Creating a Channel Group" } ], + "CreateHarvestJob": [ + { + "input": { + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "Description": "Example HarvestJob description", + "Destination": { + "S3Destination": { + "BucketName": "harvestJobS3DestinationBucket", + "DestinationPath": "manifests" + } + }, + "HarvestedManifests": { + "DashManifests": [ + { + "ManifestName": "DashManifest" + } + ], + "HlsManifests": [ + { + "ManifestName": "HlsManifest" + } + ], + "LowLatencyHlsManifests": [ + { + "ManifestName": "LowLatencyHlsManifest" + } + ] + }, + "OriginEndpointName": "exampleOriginEndpointName", + "ScheduleConfiguration": { + "EndTime": "2024-05-28T12:00:00.00Z", + "StartTime": "2024-05-28T06:00:00.00Z" + } + }, + "output": { + "Arn": "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannelName/originEndpoint/exampleOriginEndpointName/harvestJob/HarvestJobName", + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "CreatedAt": "2024-05-28T09:36:00.00Z", + "Description": "Example HarvestJob description", + "Destination": { + "S3Destination": { + "BucketName": "harvestJobS3DestinationBucket", + "DestinationPath": "manifests" + } + }, + "ETag": "GlfT+dwAyGIR4wuy8nKWl1RDPwSrjQej9qUutLZxoxk=", + "HarvestJobName": "HarvestJobName", + "HarvestedManifests": { + "DashManifests": [ + { + "ManifestName": "DashManifest" + } + ], + "HlsManifests": [ + { + "ManifestName": "HlsManifest" + } + ], + "LowLatencyHlsManifests": [ + { + "ManifestName": "LowLatencyHlsManifest" + } + ] + }, + "ModifiedAt": "2024-05-28T09:36:00.00Z", + "OriginEndpointName": "exampleOriginEndpointName", + "ScheduleConfiguration": { + "EndTime": "2024-05-28T12:00:00.00Z", + "StartTime": "2024-05-28T06:00:00.00Z" + }, + "Status": "QUEUED", + "Tags": { + "key1": "value1", + "key2": "value2" + } + }, + "id": "example-1", + "title": "Creating a Harvest Job" + } + ], "CreateOriginEndpoint": [ { "input": { @@ -662,6 +758,61 @@ "title": "Getting a Channel Policy" } ], + "GetHarvestJob": [ + { + "input": { + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "HarvestJobName": "HarvestJobName", + "OriginEndpointName": "exampleOriginEndpointName" + }, + "output": { + "Arn": "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannelName/originEndpoint/exampleOriginEndpointName/harvestJob/HarvestJobName", + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "CreatedAt": "2024-05-28T09:36:00.00Z", + "Description": "Example HarvestJob description", + "Destination": { + "S3Destination": { + "BucketName": "harvestJobS3DestinationBucket", + "DestinationPath": "manifests" + } + }, + "ETag": "GlfT+dwAyGIR4wuy8nKWl1RDPwSrjQej9qUutLZxoxk=", + "HarvestJobName": "HarvestJobName", + "HarvestedManifests": { + "DashManifests": [ + { + "ManifestName": "DashManifest" + } + ], + "HlsManifests": [ + { + "ManifestName": "HlsManifest" + } + ], + "LowLatencyHlsManifests": [ + { + "ManifestName": "LowLatencyHlsManifest" + } + ] + }, + "ModifiedAt": "2024-05-28T09:36:00.00Z", + "OriginEndpointName": "exampleOriginEndpointName", + "ScheduleConfiguration": { + "EndTime": "2024-05-28T12:00:00.00Z", + "StartTime": "2024-05-28T06:00:00.00Z" + }, + "Status": "QUEUED", + "Tags": { + "key1": "value1", + "key2": "value2" + } + }, + "id": "example-1", + "title": "Getting a Harvest Job" + } + ], "GetOriginEndpoint": [ { "input": { @@ -843,6 +994,322 @@ "title": "Listing all Channels" } ], + "ListHarvestJobs": [ + { + "input": { + "ChannelGroupName": "exampleChannelGroup" + }, + "output": { + "Items": [ + { + "Arn": "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannelName/originEndpoint/exampleOriginEndpointName/harvestJob/HarvestJobName", + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "CreatedAt": "2024-05-28T09:36:00.00Z", + "Description": "Example HarvestJob description", + "Destination": { + "S3Destination": { + "BucketName": "harvestJobS3DestinationBucket", + "DestinationPath": "manifests" + } + }, + "ETag": "GlfT+dwAyGIR4wuy8nKWl1RDPwSrjQej9qUutLZxoxk=", + "HarvestJobName": "HarvestJobName", + "HarvestedManifests": { + "DashManifests": [ + { + "ManifestName": "DashManifest" + } + ], + "HlsManifests": [ + { + "ManifestName": "HlsManifest" + } + ], + "LowLatencyHlsManifests": [ + { + "ManifestName": "LowLatencyHlsManifest" + } + ] + }, + "ModifiedAt": "2024-05-28T09:36:00.00Z", + "OriginEndpointName": "exampleOriginEndpointName", + "ScheduleConfiguration": { + "EndTime": "2024-05-28T12:00:00.00Z", + "StartTime": "2024-05-28T06:00:00.00Z" + }, + "Status": "QUEUED", + "Tags": { + "key1": "value1", + "key2": "value2" + } + }, + { + "Arn": "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannelName2/originEndpoint/exampleOriginEndpointName2/harvestJob/HarvestJobName2", + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName2", + "CreatedAt": "2024-05-28T15:30:00.00Z", + "Description": "Example HarvestJob2 description", + "Destination": { + "S3Destination": { + "BucketName": "harvestJobS3DestinationBucket", + "DestinationPath": "manifests" + } + }, + "ETag": "GlfT+dwAyGIR4wuy8nKWl1RDPwSrjQej9qUutLZxoxk=", + "HarvestJobName": "HarvestJobName2", + "HarvestedManifests": { + "DashManifests": [ + { + "ManifestName": "DashManifest" + } + ], + "HlsManifests": [ + { + "ManifestName": "HlsManifest" + } + ], + "LowLatencyHlsManifests": [ + { + "ManifestName": "LowLatencyHlsManifest" + } + ] + }, + "ModifiedAt": "2024-05-28T15:30:00.00Z", + "OriginEndpointName": "exampleOriginEndpointName2", + "ScheduleConfiguration": { + "EndTime": "2024-05-28T12:00:00.00Z", + "StartTime": "2024-05-28T02:00:00.00Z" + }, + "Status": "IN_PROGRESS", + "Tags": { + "key1": "value1", + "key2": "value2" + } + } + ], + "NextToken": "someTokenValue" + }, + "id": "example-1", + "title": "ListHarvestJobs: Specify ChannelGroup only" + }, + { + "input": { + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName" + }, + "output": { + "Items": [ + { + "Arn": "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannelName/originEndpoint/exampleOriginEndpointName/harvestJob/HarvestJobName", + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "CreatedAt": "2024-05-28T09:36:00.00Z", + "Description": "Example HarvestJob description", + "Destination": { + "S3Destination": { + "BucketName": "harvestJobS3DestinationBucket", + "DestinationPath": "manifests" + } + }, + "ETag": "GlfT+dwAyGIR4wuy8nKWl1RDPwSrjQej9qUutLZxoxk=", + "HarvestJobName": "HarvestJobName", + "HarvestedManifests": { + "DashManifests": [ + { + "ManifestName": "DashManifest" + } + ], + "HlsManifests": [ + { + "ManifestName": "HlsManifest" + } + ], + "LowLatencyHlsManifests": [ + { + "ManifestName": "LowLatencyHlsManifest" + } + ] + }, + "ModifiedAt": "2024-05-28T09:36:00.00Z", + "OriginEndpointName": "exampleOriginEndpointName", + "ScheduleConfiguration": { + "EndTime": "2024-05-28T12:00:00.00Z", + "StartTime": "2024-05-28T06:00:00.00Z" + }, + "Status": "QUEUED", + "Tags": { + "key1": "value1", + "key2": "value2" + } + }, + { + "Arn": "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannelName/originEndpoint/exampleOriginEndpointName2/harvestJob/HarvestJobName2", + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "CreatedAt": "2024-05-28T15:30:00.00Z", + "Description": "Example HarvestJob2 description", + "Destination": { + "S3Destination": { + "BucketName": "harvestJobS3DestinationBucket", + "DestinationPath": "manifests" + } + }, + "HarvestJobName": "HarvestJobName2", + "HarvestedManifests": { + "DashManifests": [ + { + "ManifestName": "DashManifest" + } + ], + "HlsManifests": [ + { + "ManifestName": "HlsManifest" + } + ], + "LowLatencyHlsManifests": [ + { + "ManifestName": "LowLatencyHlsManifest" + } + ] + }, + "ModifiedAt": "2024-05-28T15:30:00.00Z", + "OriginEndpointName": "exampleOriginEndpointName2", + "ScheduleConfiguration": { + "EndTime": "2024-05-28T12:00:00.00Z", + "StartTime": "2024-05-28T02:00:00.00Z" + }, + "Status": "IN_PROGRESS", + "Tags": { + "key1": "value1", + "key2": "value2" + } + } + ], + "NextToken": "someTokenValue" + }, + "id": "example-2", + "title": "ListHarvestJobs: Specify ChannelGroup, Channel only" + }, + { + "input": { + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "OriginEndpointName": "exampleOriginEndpointName" + }, + "output": { + "Items": [ + { + "Arn": "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannelName/originEndpoint/exampleOriginEndpointName/harvestJob/HarvestJobName", + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "CreatedAt": "2024-05-28T09:36:00.00Z", + "Description": "Example HarvestJob description", + "Destination": { + "S3Destination": { + "BucketName": "harvestJobS3DestinationBucket", + "DestinationPath": "manifests" + } + }, + "ETag": "GlfT+dwAyGIR4wuy8nKWl1RDPwSrjQej9qUutLZxoxk=", + "HarvestJobName": "HarvestJobName", + "HarvestedManifests": { + "DashManifests": [ + { + "ManifestName": "DashManifest" + } + ], + "HlsManifests": [ + { + "ManifestName": "HlsManifest" + } + ], + "LowLatencyHlsManifests": [ + { + "ManifestName": "LowLatencyHlsManifest" + } + ] + }, + "ModifiedAt": "2024-05-28T09:36:00.00Z", + "OriginEndpointName": "exampleOriginEndpointName", + "ScheduleConfiguration": { + "EndTime": "2024-05-28T12:00:00.00Z", + "StartTime": "2024-05-28T06:00:00.00Z" + }, + "Status": "QUEUED" + } + ], + "NextToken": "someTokenValue" + }, + "id": "example-3", + "title": "ListHarvestJobs: Specify ChannelGroup, Channel, OriginEndpoint" + }, + { + "input": { + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "OriginEndpointName": "exampleOriginEndpointName", + "Status": "QUEUED" + }, + "output": { + "Items": [ + { + "Arn": "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannelName/originEndpoint/exampleOriginEndpointName/harvestJob/HarvestJobName", + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "CreatedAt": "2024-05-28T09:36:00.00Z", + "Description": "Example HarvestJob description", + "Destination": { + "S3Destination": { + "BucketName": "harvestJobS3DestinationBucket", + "DestinationPath": "manifests" + } + }, + "ETag": "GlfT+dwAyGIR4wuy8nKWl1RDPwSrjQej9qUutLZxoxk=", + "HarvestJobName": "HarvestJobName", + "HarvestedManifests": { + "DashManifests": [ + { + "ManifestName": "DashManifest" + } + ], + "HlsManifests": [ + { + "ManifestName": "HlsManifest" + } + ], + "LowLatencyHlsManifests": [ + { + "ManifestName": "LowLatencyHlsManifest" + } + ] + }, + "ModifiedAt": "2024-05-28T09:36:00.00Z", + "OriginEndpointName": "exampleOriginEndpointName", + "ScheduleConfiguration": { + "EndTime": "2024-05-28T12:00:00.00Z", + "StartTime": "2024-05-28T06:00:00.00Z" + }, + "Status": "QUEUED" + } + ], + "NextToken": "someTokenValue" + }, + "id": "example-4", + "title": "ListHarvestJobs: Specify ChannelGroup, Channel, OriginEndpoint + Status filter" + }, + { + "input": { + "ChannelGroupName": "exampleChannelGroup", + "ChannelName": "exampleChannelName", + "OriginEndpointName": "exampleOriginEndpointName" + }, + "output": { + }, + "id": "example-5", + "title": "ListHarvestJobs: Empty response" + } + ], "ListOriginEndpoints": [ { "input": { diff --git a/apis/mediapackagev2/2022-12-25/paginators-1.json b/apis/mediapackagev2/2022-12-25/paginators-1.json index 92079806f5d..6b1c29d8848 100644 --- a/apis/mediapackagev2/2022-12-25/paginators-1.json +++ b/apis/mediapackagev2/2022-12-25/paginators-1.json @@ -12,6 +12,12 @@ "limit_key": "MaxResults", "result_key": "Items" }, + "ListHarvestJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + }, "ListOriginEndpoints": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/apis/mediapackagev2/2022-12-25/waiters-2.json b/apis/mediapackagev2/2022-12-25/waiters-2.json index 13f60ee66be..138579fcedb 100644 --- a/apis/mediapackagev2/2022-12-25/waiters-2.json +++ b/apis/mediapackagev2/2022-12-25/waiters-2.json @@ -1,5 +1,36 @@ { - "version": 2, - "waiters": { + "version" : 2, + "waiters" : { + "HarvestJobFinished" : { + "delay" : 2, + "maxAttempts" : 60, + "operation" : "GetHarvestJob", + "acceptors" : [ { + "matcher" : "path", + "argument" : "Status", + "state" : "success", + "expected" : "COMPLETED" + }, { + "matcher" : "path", + "argument" : "Status", + "state" : "success", + "expected" : "CANCELLED" + }, { + "matcher" : "path", + "argument" : "Status", + "state" : "failure", + "expected" : "FAILED" + }, { + "matcher" : "path", + "argument" : "Status", + "state" : "retry", + "expected" : "QUEUED" + }, { + "matcher" : "path", + "argument" : "Status", + "state" : "retry", + "expected" : "IN_PROGRESS" + } ] + } } -} +} \ No newline at end of file diff --git a/apis/opensearch/2021-01-01/api-2.json b/apis/opensearch/2021-01-01/api-2.json index b1bae1abcc1..29a3f44e18a 100644 --- a/apis/opensearch/2021-01-01/api-2.json +++ b/apis/opensearch/2021-01-01/api-2.json @@ -1513,7 +1513,8 @@ "WarmType":{"shape":"OpenSearchWarmPartitionInstanceType"}, "WarmCount":{"shape":"IntegerClass"}, "ColdStorageOptions":{"shape":"ColdStorageOptions"}, - "MultiAZWithStandbyEnabled":{"shape":"Boolean"} + "MultiAZWithStandbyEnabled":{"shape":"Boolean"}, + "NodeOptions":{"shape":"NodeOptionsList"} } }, "ClusterConfigStatus":{ @@ -3420,11 +3421,34 @@ } }, "NextToken":{"type":"string"}, + "NodeConfig":{ + "type":"structure", + "members":{ + "Enabled":{"shape":"Boolean"}, + "Type":{"shape":"OpenSearchPartitionInstanceType"}, + "Count":{"shape":"IntegerClass"} + } + }, "NodeId":{ "type":"string", "max":40, "min":10 }, + "NodeOption":{ + "type":"structure", + "members":{ + "NodeType":{"shape":"NodeOptionsNodeType"}, + "NodeConfig":{"shape":"NodeConfig"} + } + }, + "NodeOptionsList":{ + "type":"list", + "member":{"shape":"NodeOption"} + }, + "NodeOptionsNodeType":{ + "type":"string", + "enum":["coordinator"] + }, "NodeStatus":{ "type":"string", "enum":[ diff --git a/apis/opensearch/2021-01-01/docs-2.json b/apis/opensearch/2021-01-01/docs-2.json index 41f9e3dbba3..eeed24a6b0e 100644 --- a/apis/opensearch/2021-01-01/docs-2.json +++ b/apis/opensearch/2021-01-01/docs-2.json @@ -412,6 +412,7 @@ "JWTOptionsOutput$Enabled": "

True if JWT use is enabled.

", "ListInstanceTypeDetailsRequest$RetrieveAZs": "

An optional parameter that specifies the Availability Zones for the domain.

", "LogPublishingOption$Enabled": "

Whether the log should be published.

", + "NodeConfig$Enabled": "

A boolean that indicates whether a particular node type is enabled or not.

", "NodeToNodeEncryptionOptions$Enabled": "

True to enable node-to-node encryption.

", "OffPeakWindowOptions$Enabled": "

Whether to enable an off-peak window.

This option is only available when modifying a domain created prior to February 16, 2023, not when creating a new domain. All domains created after this date have the off-peak window enabled by default. You can't disable the off-peak window after it's enabled for a domain.

", "OptionStatus$PendingDeletion": "

Indicates whether the entity is being deleted.

", @@ -1574,6 +1575,7 @@ "EBSOptions$VolumeSize": "

Specifies the size (in GiB) of EBS volumes attached to data nodes.

", "EBSOptions$Iops": "

Specifies the baseline input/output (I/O) performance of EBS volumes attached to data nodes. Applicable only for the gp3 and provisioned IOPS EBS volume types.

", "EBSOptions$Throughput": "

Specifies the throughput (in MiB/s) of the EBS volumes attached to data nodes. Applicable only for the gp3 volume type.

", + "NodeConfig$Count": "

The number of nodes of a particular node type in the cluster.

", "SAMLOptionsInput$SessionTimeoutMinutes": "

The duration, in minutes, after which a user session becomes inactive. Acceptable values are between 1 and 1440, and the default value is 60.

", "SAMLOptionsOutput$SessionTimeoutMinutes": "

The duration, in minutes, after which a user session becomes inactive.

", "SnapshotOptions$AutomatedSnapshotStartHour": "

The time, in UTC format, when OpenSearch Service takes a daily automated snapshot of the specified domain. Default is 0 hours.

", @@ -1972,6 +1974,12 @@ "ListVpcEndpointsResponse$NextToken": "

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } }, + "NodeConfig": { + "base": "

Container for specifying configuration of any node type.

", + "refs": { + "NodeOption$NodeConfig": "

Container for specifying configuration of any node type.

" + } + }, "NodeId": { "base": null, "refs": { @@ -1981,6 +1989,24 @@ "StartDomainMaintenanceRequest$NodeId": "

The ID of the data node.

" } }, + "NodeOption": { + "base": "

Container for specifying node type.

", + "refs": { + "NodeOptionsList$member": null + } + }, + "NodeOptionsList": { + "base": null, + "refs": { + "ClusterConfig$NodeOptions": "

List of node options for the domain.

" + } + }, + "NodeOptionsNodeType": { + "base": null, + "refs": { + "NodeOption$NodeType": "

Container for node type like coordinating.

" + } + }, "NodeStatus": { "base": null, "refs": { @@ -2071,6 +2097,7 @@ "DescribeInstanceTypeLimitsRequest$InstanceType": "

The OpenSearch Service instance type for which you need limit information.

", "DomainNodesStatus$InstanceType": "

The instance type information of the node.

", "InstanceTypeDetails$InstanceType": "

The instance type.

", + "NodeConfig$Type": "

The instance type of a particular node type in the cluster.

", "ReservedInstance$InstanceType": "

The OpenSearch instance type offered by theReserved Instance offering.

", "ReservedInstanceOffering$InstanceType": "

The OpenSearch instance type offered by the Reserved Instance offering.

" } diff --git a/apis/rds/2014-10-31/api-2.json b/apis/rds/2014-10-31/api-2.json index ea130fdcb9e..a3872667ad7 100644 --- a/apis/rds/2014-10-31/api-2.json +++ b/apis/rds/2014-10-31/api-2.json @@ -9179,6 +9179,11 @@ "ServerlessV2ScalingConfiguration":{"shape":"ServerlessV2ScalingConfiguration"}, "NetworkType":{"shape":"String"}, "RdsCustomClusterConfiguration":{"shape":"RdsCustomClusterConfiguration"}, + "MonitoringInterval":{"shape":"IntegerOptional"}, + "MonitoringRoleArn":{"shape":"String"}, + "EnablePerformanceInsights":{"shape":"BooleanOptional"}, + "PerformanceInsightsKMSKeyId":{"shape":"String"}, + "PerformanceInsightsRetentionPeriod":{"shape":"IntegerOptional"}, "EngineLifecycleSupport":{"shape":"String"} } }, @@ -9221,6 +9226,11 @@ "NetworkType":{"shape":"String"}, "SourceDbClusterResourceId":{"shape":"String"}, "RdsCustomClusterConfiguration":{"shape":"RdsCustomClusterConfiguration"}, + "MonitoringInterval":{"shape":"IntegerOptional"}, + "MonitoringRoleArn":{"shape":"String"}, + "EnablePerformanceInsights":{"shape":"BooleanOptional"}, + "PerformanceInsightsKMSKeyId":{"shape":"String"}, + "PerformanceInsightsRetentionPeriod":{"shape":"IntegerOptional"}, "EngineLifecycleSupport":{"shape":"String"} } }, diff --git a/apis/rds/2014-10-31/docs-2.json b/apis/rds/2014-10-31/docs-2.json index b463569f5bb..280e431ebda 100644 --- a/apis/rds/2014-10-31/docs-2.json +++ b/apis/rds/2014-10-31/docs-2.json @@ -574,7 +574,7 @@ "CreateDBClusterMessage$PubliclyAccessible": "

Specifies whether the DB cluster is publicly accessible.

When the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

Valid for Cluster Type: Multi-AZ DB clusters only

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

", "CreateDBClusterMessage$AutoMinorVersionUpgrade": "

Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.

Valid for Cluster Type: Multi-AZ DB clusters only

", "CreateDBClusterMessage$EnablePerformanceInsights": "

Specifies whether to turn on Performance Insights for the DB cluster.

For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

Valid for Cluster Type: Multi-AZ DB clusters only

", - "CreateDBClusterMessage$EnableLimitlessDatabase": "

Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group.

Valid for: Aurora DB clusters only

", + "CreateDBClusterMessage$EnableLimitlessDatabase": "

Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group.

Valid for: Aurora DB clusters only

This setting is no longer used. Instead use the ClusterScalabilityType setting.

", "CreateDBClusterMessage$ManageMasterUserPassword": "

Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.

For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Constraints:

", "CreateDBClusterMessage$EnableLocalWriteForwarding": "

Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.

Valid for: Aurora DB clusters only

", "CreateDBInstanceMessage$MultiAZ": "

Specifies whether the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

This setting doesn't apply to the following DB instances:

", @@ -649,7 +649,7 @@ "ModifyDBClusterMessage$ManageMasterUserPassword": "

Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.

If the DB cluster doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword.

If the DB cluster already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new password for the master user specified by MasterUserPassword.

For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

", "ModifyDBClusterMessage$RotateMasterUserPassword": "

Specifies whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password.

This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The secret value contains the updated password.

For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Constraints:

", "ModifyDBClusterMessage$EnableLocalWriteForwarding": "

Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.

Valid for: Aurora DB clusters only

", - "ModifyDBClusterMessage$EnableLimitlessDatabase": "

Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group.

Valid for: Aurora DB clusters only

", + "ModifyDBClusterMessage$EnableLimitlessDatabase": "

Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group.

Valid for: Aurora DB clusters only

This setting is no longer used. Instead use the ClusterScalabilityType setting when you create your Aurora Limitless Database DB cluster.

", "ModifyDBInstanceMessage$MultiAZ": "

Specifies whether the DB instance is a Multi-AZ deployment. Changing this parameter doesn't result in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request.

This setting doesn't apply to RDS Custom DB instances.

", "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

Specifies whether minor version upgrades are applied automatically to the DB instance during the maintenance window. An outage occurs when all the following conditions are met:

If any of the preceding conditions isn't met, Amazon RDS applies the change as soon as possible and doesn't cause an outage.

For an RDS Custom DB instance, don't enable this setting. Otherwise, the operation returns an error.

", "ModifyDBInstanceMessage$CopyTagsToSnapshot": "

Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags aren't copied.

This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see ModifyDBCluster.

", @@ -688,10 +688,12 @@ "RestoreDBClusterFromSnapshotMessage$DeletionProtection": "

Specifies whether to enable deletion protection for the DB cluster. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterFromSnapshotMessage$CopyTagsToSnapshot": "

Specifies whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterFromSnapshotMessage$PubliclyAccessible": "

Specifies whether the DB cluster is publicly accessible.

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", + "RestoreDBClusterFromSnapshotMessage$EnablePerformanceInsights": "

Specifies whether to turn on Performance Insights for the DB cluster.

", "RestoreDBClusterToPointInTimeMessage$EnableIAMDatabaseAuthentication": "

Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterToPointInTimeMessage$DeletionProtection": "

Specifies whether to enable deletion protection for the DB cluster. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterToPointInTimeMessage$CopyTagsToSnapshot": "

Specifies whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterToPointInTimeMessage$PubliclyAccessible": "

Specifies whether the DB cluster is publicly accessible.

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

Valid for: Multi-AZ DB clusters only

", + "RestoreDBClusterToPointInTimeMessage$EnablePerformanceInsights": "

Specifies whether to turn on Performance Insights for the DB cluster.

", "RestoreDBInstanceFromDBSnapshotMessage$MultiAZ": "

Specifies whether the DB instance is a Multi-AZ deployment.

This setting doesn't apply to RDS Custom.

Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

", "RestoreDBInstanceFromDBSnapshotMessage$PubliclyAccessible": "

Specifies whether the DB instance is publicly accessible.

When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

For more information, see CreateDBInstance.

", "RestoreDBInstanceFromDBSnapshotMessage$AutoMinorVersionUpgrade": "

Specifies whether to automatically apply minor version upgrades to the DB instance during the maintenance window.

If you restore an RDS Custom DB instance, you must disable this parameter.

", @@ -3131,8 +3133,12 @@ "RestoreDBClusterFromS3Message$Port": "

The port number on which the instances in the restored DB cluster accept connections.

Default: 3306

", "RestoreDBClusterFromSnapshotMessage$Port": "

The port number on which the new DB cluster accepts connections.

Constraints: This value must be 1150-65535

Default: The same port as the original DB cluster.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterFromSnapshotMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.

For information about valid IOPS values, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide.

Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB instance.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", + "RestoreDBClusterFromSnapshotMessage$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0.

If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0.

Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60

Default: 0

", + "RestoreDBClusterFromSnapshotMessage$PerformanceInsightsRetentionPeriod": "

The number of days to retain Performance Insights data.

Valid Values:

Default: 7 days

If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

", "RestoreDBClusterToPointInTimeMessage$Port": "

The port number on which the new DB cluster accepts connections.

Constraints: A value from 1150-65535.

Default: The default port for the engine.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterToPointInTimeMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.

For information about valid IOPS values, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide.

Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB instance.

Valid for: Multi-AZ DB clusters only

", + "RestoreDBClusterToPointInTimeMessage$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0.

If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0.

Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60

Default: 0

", + "RestoreDBClusterToPointInTimeMessage$PerformanceInsightsRetentionPeriod": "

The number of days to retain Performance Insights data.

Valid Values:

Default: 7 days

If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

", "RestoreDBInstanceFromDBSnapshotMessage$Port": "

The port number on which the database accepts connections.

Default: The same port as the original DB instance

Constraints: Value must be 1150-65535

", "RestoreDBInstanceFromDBSnapshotMessage$Iops": "

Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter isn't specified, the IOPS value is taken from the backup. If this parameter is set to 0, the new instance is converted to a non-PIOPS instance. The conversion takes additional time, though your DB instance is available for connections before the conversion starts.

The provisioned IOPS value must follow the requirements for your database engine. For more information, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide.

Constraints: Must be an integer greater than 1000.

", "RestoreDBInstanceFromDBSnapshotMessage$StorageThroughput": "

Specifies the storage throughput value for the DB instance.

This setting doesn't apply to RDS Custom or Amazon Aurora.

", @@ -4628,7 +4634,7 @@ "AddSourceIdentifierToSubscriptionMessage$SourceIdentifier": "

The identifier of the event source to be added.

Constraints:

", "AddTagsToResourceMessage$ResourceName": "

The Amazon RDS resource that the tags are added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

", "ApplyPendingMaintenanceActionMessage$ResourceIdentifier": "

The RDS Amazon Resource Name (ARN) of the resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

", - "ApplyPendingMaintenanceActionMessage$ApplyAction": "

The pending maintenance action to apply to this resource.

Valid Values: system-update, db-upgrade, hardware-maintenance, ca-certificate-rotation

", + "ApplyPendingMaintenanceActionMessage$ApplyAction": "

The pending maintenance action to apply to this resource.

Valid Values:

For more information about these actions, see Maintenance actions for Amazon Aurora or Maintenance actions for Amazon RDS.

", "ApplyPendingMaintenanceActionMessage$OptInType": "

A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type immediate can't be undone.

Valid Values:

", "AttributeValueList$member": null, "AuthorizeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

The name of the DB security group to add authorization to.

", @@ -5455,7 +5461,7 @@ "Parameter$DataType": "

Specifies the valid data type for the parameter.

", "Parameter$AllowedValues": "

Specifies the valid range of values for the parameter.

", "Parameter$MinimumEngineVersion": "

The earliest engine version to which the parameter can apply.

", - "PendingMaintenanceAction$Action": "

The type of pending maintenance action that is available for the resource.

For more information about maintenance actions, see Maintaining a DB instance.

Valid Values: system-update | db-upgrade | hardware-maintenance | ca-certificate-rotation

", + "PendingMaintenanceAction$Action": "

The type of pending maintenance action that is available for the resource.

For more information about maintenance actions, see Maintaining a DB instance.

Valid Values:

For more information about these actions, see Maintenance actions for Amazon Aurora or Maintenance actions for Amazon RDS.

", "PendingMaintenanceAction$OptInStatus": "

Indicates the type of opt-in request that has been received for the resource.

", "PendingMaintenanceAction$Description": "

A description providing more detail about the maintenance action.

", "PendingMaintenanceActionsMessage$Marker": "

An optional pagination token provided by a previous DescribePendingMaintenanceActions request. If this parameter is specified, the response includes only records beyond the marker, up to a number of records specified by MaxRecords.

", @@ -5567,6 +5573,8 @@ "RestoreDBClusterFromSnapshotMessage$DBClusterInstanceClass": "

The compute and memory capacity of the each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Valid for: Multi-AZ DB clusters only

", "RestoreDBClusterFromSnapshotMessage$StorageType": "

Specifies the storage type to be associated with the DB cluster.

When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

Valid Values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterFromSnapshotMessage$NetworkType": "

The network type of the DB cluster.

Valid Values:

The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", + "RestoreDBClusterFromSnapshotMessage$MonitoringRoleArn": "

The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess.

If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

", + "RestoreDBClusterFromSnapshotMessage$PerformanceInsightsKMSKeyId": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

", "RestoreDBClusterFromSnapshotMessage$EngineLifecycleSupport": "

The life cycle type for this DB cluster.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

", "RestoreDBClusterToPointInTimeMessage$DBClusterIdentifier": "

The name of the new DB cluster to be created.

Constraints:

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterToPointInTimeMessage$RestoreType": "

The type of restore to be performed. You can specify one of the following values:

If you don't specify a RestoreType value, then the new DB cluster is restored as a full copy of the source DB cluster.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", @@ -5582,6 +5590,8 @@ "RestoreDBClusterToPointInTimeMessage$StorageType": "

Specifies the storage type to be associated with the DB cluster.

When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

Valid Values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterToPointInTimeMessage$NetworkType": "

The network type of the DB cluster.

Valid Values:

The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", "RestoreDBClusterToPointInTimeMessage$SourceDbClusterResourceId": "

The resource ID of the source DB cluster from which to restore.

", + "RestoreDBClusterToPointInTimeMessage$MonitoringRoleArn": "

The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess.

If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

", + "RestoreDBClusterToPointInTimeMessage$PerformanceInsightsKMSKeyId": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

", "RestoreDBClusterToPointInTimeMessage$EngineLifecycleSupport": "

The life cycle type for this DB cluster.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

", "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

The name of the DB instance to create from the DB snapshot. This parameter isn't case-sensitive.

Constraints:

Example: my-snapshot-id

", "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

The identifier for the DB snapshot to restore from.

Constraints:

", diff --git a/apis/storagegateway/2013-06-30/docs-2.json b/apis/storagegateway/2013-06-30/docs-2.json index 9dc0dac141c..1ba57569d6f 100644 --- a/apis/storagegateway/2013-06-30/docs-2.json +++ b/apis/storagegateway/2013-06-30/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "Storage Gateway Service

Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the Amazon Web Services storage infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the Storage Gateway Service API Reference:

Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer Storage Gateway volume and snapshot IDs coming in 2016.

", + "service": "Storage Gateway Service

Amazon FSx File Gateway is no longer available to new customers. Existing customers of FSx File Gateway can continue to use the service normally. For capabilities similar to FSx File Gateway, visit this blog post.

Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the Amazon Web Services storage infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the Storage Gateway Service API Reference:

Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer Storage Gateway volume and snapshot IDs coming in 2016.

", "operations": { "ActivateGateway": "

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the Amazon Web Services Region that you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account. For more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

", "AddCache": "

Configures one or more gateway local disks as cache for a gateway. This operation is only supported in the cached volume, tape, and file gateway type (see How Storage Gateway works (architecture).

In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add cache, and one or more disk IDs that you want to configure as cache.

", @@ -248,13 +248,13 @@ "BandwidthDownloadRateLimit": { "base": null, "refs": { - "BandwidthRateLimitInterval$AverageDownloadRateLimitInBitsPerSec": "

The average download rate limit component of the bandwidth rate limit interval, in bits per second. This field does not appear in the response if the download rate limit is not set.

", + "BandwidthRateLimitInterval$AverageDownloadRateLimitInBitsPerSec": "

The average download rate limit component of the bandwidth rate limit interval, in bits per second. This field does not appear in the response if the download rate limit is not set.

S3 File Gateway does not support this feature.

", "DescribeBandwidthRateLimitOutput$AverageDownloadRateLimitInBitsPerSec": "

The average download bandwidth rate limit in bits per second. This field does not appear in the response if the download rate limit is not set.

", "UpdateBandwidthRateLimitInput$AverageDownloadRateLimitInBitsPerSec": "

The average download bandwidth rate limit in bits per second.

" } }, "BandwidthRateLimitInterval": { - "base": "

Describes a bandwidth rate limit interval for a gateway. A bandwidth rate limit schedule consists of one or more bandwidth rate limit intervals. A bandwidth rate limit interval defines a period of time on one or more days of the week, during which bandwidth rate limits are specified for uploading, downloading, or both.

", + "base": "

Describes a bandwidth rate limit interval for a gateway. A bandwidth rate limit schedule consists of one or more bandwidth rate limit intervals. A bandwidth rate limit interval defines a period of time on one or more days of the week, during which bandwidth rate limits are specified for uploading, downloading, or both.

FSx File Gateway does not support this feature.

", "refs": { "BandwidthRateLimitIntervals$member": null } @@ -275,7 +275,7 @@ "BandwidthUploadRateLimit": { "base": null, "refs": { - "BandwidthRateLimitInterval$AverageUploadRateLimitInBitsPerSec": "

The average upload rate limit component of the bandwidth rate limit interval, in bits per second. This field does not appear in the response if the upload rate limit is not set.

For Tape Gateway and Volume Gateway, the minimum value is 51200.

For S3 File Gateway and FSx File Gateway, the minimum value is 104857600.

", + "BandwidthRateLimitInterval$AverageUploadRateLimitInBitsPerSec": "

The average upload rate limit component of the bandwidth rate limit interval, in bits per second. This field does not appear in the response if the upload rate limit is not set.

For Tape Gateway and Volume Gateway, the minimum value is 51200.

This field is required for S3 File Gateway, and the minimum value is 104857600.

", "DescribeBandwidthRateLimitOutput$AverageUploadRateLimitInBitsPerSec": "

The average upload bandwidth rate limit in bits per second. This field does not appear in the response if the upload rate limit is not set.

", "UpdateBandwidthRateLimitInput$AverageUploadRateLimitInBitsPerSec": "

The average upload bandwidth rate limit in bits per second.

" } @@ -1091,12 +1091,12 @@ "FileShareName": { "base": null, "refs": { - "CreateNFSFileShareInput$FileShareName": "

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used.

", - "CreateSMBFileShareInput$FileShareName": "

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used.

", + "CreateNFSFileShareInput$FileShareName": "

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used.

A valid NFS file share name can only contain the following characters: a-z, A-Z, 0-9, -, ., and _.

", + "CreateSMBFileShareInput$FileShareName": "

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used.

A valid SMB file share name cannot contain the following characters: [,],#,;,<,>,:,\",\\,/,|,?,*,+, or ASCII control characters 1-31.

", "NFSFileShareInfo$FileShareName": "

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used.

", "SMBFileShareInfo$FileShareName": "

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used.

", - "UpdateNFSFileShareInput$FileShareName": "

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used.

", - "UpdateSMBFileShareInput$FileShareName": "

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used.

" + "UpdateNFSFileShareInput$FileShareName": "

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used.

A valid NFS file share name can only contain the following characters: a-z, A-Z, 0-9, -, ., and _.

", + "UpdateSMBFileShareInput$FileShareName": "

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used.

A valid SMB file share name cannot contain the following characters: [,],#,;,<,>,:,\",\\,/,|,?,*,+, or ASCII control characters 1-31.

" } }, "FileShareStatus": { @@ -1373,9 +1373,9 @@ "GatewayType": { "base": null, "refs": { - "ActivateGatewayInput$GatewayType": "

A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED.

Valid Values: STORED | CACHED | VTL | FILE_S3 | FILE_FSX_SMB

", - "DescribeGatewayInformationOutput$GatewayType": "

The type of the gateway.

", - "GatewayInfo$GatewayType": "

The type of the gateway.

" + "ActivateGatewayInput$GatewayType": "

A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED.

Amazon FSx File Gateway is no longer available to new customers. Existing customers of FSx File Gateway can continue to use the service normally. For capabilities similar to FSx File Gateway, visit this blog post.

Valid Values: STORED | CACHED | VTL | FILE_S3 | FILE_FSX_SMB

", + "DescribeGatewayInformationOutput$GatewayType": "

The type of the gateway.

Amazon FSx File Gateway is no longer available to new customers. Existing customers of FSx File Gateway can continue to use the service normally. For capabilities similar to FSx File Gateway, visit this blog post.

", + "GatewayInfo$GatewayType": "

The type of the gateway.

Amazon FSx File Gateway is no longer available to new customers. Existing customers of FSx File Gateway can continue to use the service normally. For capabilities similar to FSx File Gateway, visit this blog post.

" } }, "Gateways": { @@ -1623,10 +1623,10 @@ } }, "LocationARN": { - "base": "

A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/).

You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples.

Bucket ARN:

arn:aws:s3:::my-bucket/prefix/

Access point ARN:

arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/

If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide.

Access point alias:

test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias

", + "base": "

A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/).

You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples.

Bucket ARN:

arn:aws:s3:::amzn-s3-demo-bucket/prefix/

Access point ARN:

arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/

If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide.

Access point alias:

test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias

", "refs": { - "CreateNFSFileShareInput$LocationARN": "

A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/).

You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples.

Bucket ARN:

arn:aws:s3:::my-bucket/prefix/

Access point ARN:

arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/

If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide.

Access point alias:

test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias

", - "CreateSMBFileShareInput$LocationARN": "

A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/).

You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples.

Bucket ARN:

arn:aws:s3:::my-bucket/prefix/

Access point ARN:

arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/

If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide.

Access point alias:

test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias

", + "CreateNFSFileShareInput$LocationARN": "

A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/).

You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples.

Bucket ARN:

arn:aws:s3:::amzn-s3-demo-bucket/prefix/

Access point ARN:

arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/

If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide.

Access point alias:

test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias

", + "CreateSMBFileShareInput$LocationARN": "

A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/).

You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples.

Bucket ARN:

arn:aws:s3:::amzn-s3-demo-bucket/prefix/

Access point ARN:

arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/

If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide.

Access point alias:

test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias

", "NFSFileShareInfo$LocationARN": null, "SMBFileShareInfo$LocationARN": null } diff --git a/gems/aws-partitions/CHANGELOG.md b/gems/aws-partitions/CHANGELOG.md index 434b33b7755..8ab5c74e327 100644 --- a/gems/aws-partitions/CHANGELOG.md +++ b/gems/aws-partitions/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.997.0 (2024-10-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + 1.996.0 (2024-10-25) ------------------ diff --git a/gems/aws-partitions/VERSION b/gems/aws-partitions/VERSION index 603b4c6d775..f86f820b76a 100644 --- a/gems/aws-partitions/VERSION +++ b/gems/aws-partitions/VERSION @@ -1 +1 @@ -1.996.0 +1.997.0 diff --git a/gems/aws-partitions/partitions.json b/gems/aws-partitions/partitions.json index 622af11a634..5a517bed706 100644 --- a/gems/aws-partitions/partitions.json +++ b/gems/aws-partitions/partitions.json @@ -20404,6 +20404,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "storagegateway-fips.ca-central-1.amazonaws.com", diff --git a/gems/aws-sdk-mediapackagev2/CHANGELOG.md b/gems/aws-sdk-mediapackagev2/CHANGELOG.md index edf9a515321..d3ac2bb43fb 100644 --- a/gems/aws-sdk-mediapackagev2/CHANGELOG.md +++ b/gems/aws-sdk-mediapackagev2/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.31.0 (2024-10-28) +------------------ + +* Feature - MediaPackage V2 Live to VOD Harvester is a MediaPackage V2 feature, which is used to export content from an origin endpoint to a S3 bucket. + 1.30.0 (2024-10-18) ------------------ diff --git a/gems/aws-sdk-mediapackagev2/VERSION b/gems/aws-sdk-mediapackagev2/VERSION index 034552a83ee..34aae156b19 100644 --- a/gems/aws-sdk-mediapackagev2/VERSION +++ b/gems/aws-sdk-mediapackagev2/VERSION @@ -1 +1 @@ -1.30.0 +1.31.0 diff --git a/gems/aws-sdk-mediapackagev2/aws-sdk-mediapackagev2.gemspec b/gems/aws-sdk-mediapackagev2/aws-sdk-mediapackagev2.gemspec index a5def40adb4..2d8b48dbc85 100644 --- a/gems/aws-sdk-mediapackagev2/aws-sdk-mediapackagev2.gemspec +++ b/gems/aws-sdk-mediapackagev2/aws-sdk-mediapackagev2.gemspec @@ -26,7 +26,7 @@ Gem::Specification.new do |spec| } spec.add_dependency('aws-sdk-core', '~> 3', '>= 3.210.0') - spec.add_dependency('aws-sigv4', '~> 1.1') + spec.add_dependency('aws-sigv4', '~> 1.5') spec.required_ruby_version = '>= 2.5' end diff --git a/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2.rb b/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2.rb index 7ed27cbabeb..68ec816b1c0 100644 --- a/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2.rb +++ b/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2.rb @@ -23,7 +23,7 @@ # structure. # # media_package_v2 = Aws::MediaPackageV2::Client.new -# resp = media_package_v2.create_channel(params) +# resp = media_package_v2.cancel_harvest_job(params) # # See {Client} for more information. # @@ -55,7 +55,7 @@ module Plugins autoload :EndpointProvider, 'aws-sdk-mediapackagev2/endpoint_provider' autoload :Endpoints, 'aws-sdk-mediapackagev2/endpoints' - GEM_VERSION = '1.30.0' + GEM_VERSION = '1.31.0' end diff --git a/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/client.rb b/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/client.rb index e82da04b558..5fc1a702b79 100644 --- a/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/client.rb +++ b/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/client.rb @@ -447,6 +447,62 @@ def initialize(*args) # @!group API Operations + # Cancels an in-progress harvest job. + # + # @option params [required, String] :channel_group_name + # The name of the channel group containing the channel from which the + # harvest job is running. + # + # @option params [required, String] :channel_name + # The name of the channel from which the harvest job is running. + # + # @option params [required, String] :origin_endpoint_name + # The name of the origin endpoint that the harvest job is harvesting + # from. This cannot be changed after the harvest job is submitted. + # + # @option params [required, String] :harvest_job_name + # The name of the harvest job to cancel. This name must be unique within + # the channel and cannot be changed after the harvest job is submitted. + # + # @option params [String] :etag + # The current Entity Tag (ETag) associated with the harvest job. Used + # for concurrency control. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: Cancel a Harvest Job + # + # resp = client.cancel_harvest_job({ + # channel_group_name: "exampleChannelGroup", + # channel_name: "exampleChannelName", + # harvest_job_name: "HarvestJobName", + # origin_endpoint_name: "exampleOriginEndpointName", + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Request syntax with placeholder values + # + # resp = client.cancel_harvest_job({ + # channel_group_name: "ResourceName", # required + # channel_name: "ResourceName", # required + # origin_endpoint_name: "ResourceName", # required + # harvest_job_name: "ResourceName", # required + # etag: "EntityTag", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/CancelHarvestJob AWS API Documentation + # + # @overload cancel_harvest_job(params = {}) + # @param [Hash] params ({}) + def cancel_harvest_job(params = {}, options = {}) + req = build_request(:cancel_harvest_job, params) + req.send_request(options) + end + # Create a channel to start receiving content streams. The channel # represents the input to MediaPackage for incoming live content from an # encoder such as AWS Elemental MediaLive. The channel receives content, @@ -692,6 +748,222 @@ def create_channel_group(params = {}, options = {}) req.send_request(options) end + # Creates a new harvest job to export content from a MediaPackage v2 + # channel to an S3 bucket. + # + # @option params [required, String] :channel_group_name + # The name of the channel group containing the channel from which to + # harvest content. + # + # @option params [required, String] :channel_name + # The name of the channel from which to harvest content. + # + # @option params [required, String] :origin_endpoint_name + # The name of the origin endpoint from which to harvest content. + # + # @option params [String] :description + # An optional description for the harvest job. + # + # @option params [required, Types::HarvestedManifests] :harvested_manifests + # A list of manifests to be harvested. + # + # @option params [required, Types::HarvesterScheduleConfiguration] :schedule_configuration + # The configuration for when the harvest job should run, including start + # and end times. + # + # @option params [required, Types::Destination] :destination + # The S3 destination where the harvested content will be placed. + # + # @option params [String] :client_token + # A unique, case-sensitive identifier that you provide to ensure the + # idempotency of the request. + # + # **A suitable default value is auto-generated.** You should normally + # not need to pass this option.** + # + # @option params [String] :harvest_job_name + # A name for the harvest job. This name must be unique within the + # channel. + # + # @option params [Hash] :tags + # A collection of tags associated with the harvest job. + # + # @return [Types::CreateHarvestJobResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CreateHarvestJobResponse#channel_group_name #channel_group_name} => String + # * {Types::CreateHarvestJobResponse#channel_name #channel_name} => String + # * {Types::CreateHarvestJobResponse#origin_endpoint_name #origin_endpoint_name} => String + # * {Types::CreateHarvestJobResponse#destination #destination} => Types::Destination + # * {Types::CreateHarvestJobResponse#harvest_job_name #harvest_job_name} => String + # * {Types::CreateHarvestJobResponse#harvested_manifests #harvested_manifests} => Types::HarvestedManifests + # * {Types::CreateHarvestJobResponse#description #description} => String + # * {Types::CreateHarvestJobResponse#schedule_configuration #schedule_configuration} => Types::HarvesterScheduleConfiguration + # * {Types::CreateHarvestJobResponse#arn #arn} => String + # * {Types::CreateHarvestJobResponse#created_at #created_at} => Time + # * {Types::CreateHarvestJobResponse#modified_at #modified_at} => Time + # * {Types::CreateHarvestJobResponse#status #status} => String + # * {Types::CreateHarvestJobResponse#error_message #error_message} => String + # * {Types::CreateHarvestJobResponse#etag #etag} => String + # * {Types::CreateHarvestJobResponse#tags #tags} => Hash<String,String> + # + # + # @example Example: Creating a Harvest Job + # + # resp = client.create_harvest_job({ + # channel_group_name: "exampleChannelGroup", + # channel_name: "exampleChannelName", + # description: "Example HarvestJob description", + # destination: { + # s3_destination: { + # bucket_name: "harvestJobS3DestinationBucket", + # destination_path: "manifests", + # }, + # }, + # harvested_manifests: { + # dash_manifests: [ + # { + # manifest_name: "DashManifest", + # }, + # ], + # hls_manifests: [ + # { + # manifest_name: "HlsManifest", + # }, + # ], + # low_latency_hls_manifests: [ + # { + # manifest_name: "LowLatencyHlsManifest", + # }, + # ], + # }, + # origin_endpoint_name: "exampleOriginEndpointName", + # schedule_configuration: { + # end_time: Time.parse("2024-05-28T12:00:00.00Z"), + # start_time: Time.parse("2024-05-28T06:00:00.00Z"), + # }, + # }) + # + # resp.to_h outputs the following: + # { + # arn: "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannelName/originEndpoint/exampleOriginEndpointName/harvestJob/HarvestJobName", + # channel_group_name: "exampleChannelGroup", + # channel_name: "exampleChannelName", + # created_at: Time.parse("2024-05-28T09:36:00.00Z"), + # description: "Example HarvestJob description", + # destination: { + # s3_destination: { + # bucket_name: "harvestJobS3DestinationBucket", + # destination_path: "manifests", + # }, + # }, + # etag: "GlfT+dwAyGIR4wuy8nKWl1RDPwSrjQej9qUutLZxoxk=", + # harvest_job_name: "HarvestJobName", + # harvested_manifests: { + # dash_manifests: [ + # { + # manifest_name: "DashManifest", + # }, + # ], + # hls_manifests: [ + # { + # manifest_name: "HlsManifest", + # }, + # ], + # low_latency_hls_manifests: [ + # { + # manifest_name: "LowLatencyHlsManifest", + # }, + # ], + # }, + # modified_at: Time.parse("2024-05-28T09:36:00.00Z"), + # origin_endpoint_name: "exampleOriginEndpointName", + # schedule_configuration: { + # end_time: Time.parse("2024-05-28T12:00:00.00Z"), + # start_time: Time.parse("2024-05-28T06:00:00.00Z"), + # }, + # status: "QUEUED", + # tags: { + # "key1" => "value1", + # "key2" => "value2", + # }, + # } + # + # @example Request syntax with placeholder values + # + # resp = client.create_harvest_job({ + # channel_group_name: "ResourceName", # required + # channel_name: "ResourceName", # required + # origin_endpoint_name: "ResourceName", # required + # description: "ResourceDescription", + # harvested_manifests: { # required + # hls_manifests: [ + # { + # manifest_name: "ResourceName", # required + # }, + # ], + # dash_manifests: [ + # { + # manifest_name: "ResourceName", # required + # }, + # ], + # low_latency_hls_manifests: [ + # { + # manifest_name: "ResourceName", # required + # }, + # ], + # }, + # schedule_configuration: { # required + # start_time: Time.now, # required + # end_time: Time.now, # required + # }, + # destination: { # required + # s3_destination: { # required + # bucket_name: "S3BucketName", # required + # destination_path: "S3DestinationPath", # required + # }, + # }, + # client_token: "IdempotencyToken", + # harvest_job_name: "ResourceName", + # tags: { + # "TagKey" => "TagValue", + # }, + # }) + # + # @example Response structure + # + # resp.channel_group_name #=> String + # resp.channel_name #=> String + # resp.origin_endpoint_name #=> String + # resp.destination.s3_destination.bucket_name #=> String + # resp.destination.s3_destination.destination_path #=> String + # resp.harvest_job_name #=> String + # resp.harvested_manifests.hls_manifests #=> Array + # resp.harvested_manifests.hls_manifests[0].manifest_name #=> String + # resp.harvested_manifests.dash_manifests #=> Array + # resp.harvested_manifests.dash_manifests[0].manifest_name #=> String + # resp.harvested_manifests.low_latency_hls_manifests #=> Array + # resp.harvested_manifests.low_latency_hls_manifests[0].manifest_name #=> String + # resp.description #=> String + # resp.schedule_configuration.start_time #=> Time + # resp.schedule_configuration.end_time #=> Time + # resp.arn #=> String + # resp.created_at #=> Time + # resp.modified_at #=> Time + # resp.status #=> String, one of "QUEUED", "IN_PROGRESS", "CANCELLED", "COMPLETED", "FAILED" + # resp.error_message #=> String + # resp.etag #=> String + # resp.tags #=> Hash + # resp.tags["TagKey"] #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/CreateHarvestJob AWS API Documentation + # + # @overload create_harvest_job(params = {}) + # @param [Hash] params ({}) + def create_harvest_job(params = {}, options = {}) + req = build_request(:create_harvest_job, params) + req.send_request(options) + end + # The endpoint is attached to a channel, and represents the output of # the live content. You can associate multiple endpoints to a single # channel. Each endpoint gives players and downstream CDNs (such as @@ -1894,6 +2166,143 @@ def get_channel_policy(params = {}, options = {}) req.send_request(options) end + # Retrieves the details of a specific harvest job. + # + # @option params [required, String] :channel_group_name + # The name of the channel group containing the channel associated with + # the harvest job. + # + # @option params [required, String] :channel_name + # The name of the channel associated with the harvest job. + # + # @option params [required, String] :origin_endpoint_name + # The name of the origin endpoint associated with the harvest job. + # + # @option params [required, String] :harvest_job_name + # The name of the harvest job to retrieve. + # + # @return [Types::GetHarvestJobResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetHarvestJobResponse#channel_group_name #channel_group_name} => String + # * {Types::GetHarvestJobResponse#channel_name #channel_name} => String + # * {Types::GetHarvestJobResponse#origin_endpoint_name #origin_endpoint_name} => String + # * {Types::GetHarvestJobResponse#destination #destination} => Types::Destination + # * {Types::GetHarvestJobResponse#harvest_job_name #harvest_job_name} => String + # * {Types::GetHarvestJobResponse#harvested_manifests #harvested_manifests} => Types::HarvestedManifests + # * {Types::GetHarvestJobResponse#description #description} => String + # * {Types::GetHarvestJobResponse#schedule_configuration #schedule_configuration} => Types::HarvesterScheduleConfiguration + # * {Types::GetHarvestJobResponse#arn #arn} => String + # * {Types::GetHarvestJobResponse#created_at #created_at} => Time + # * {Types::GetHarvestJobResponse#modified_at #modified_at} => Time + # * {Types::GetHarvestJobResponse#status #status} => String + # * {Types::GetHarvestJobResponse#error_message #error_message} => String + # * {Types::GetHarvestJobResponse#etag #etag} => String + # * {Types::GetHarvestJobResponse#tags #tags} => Hash<String,String> + # + # + # @example Example: Getting a Harvest Job + # + # resp = client.get_harvest_job({ + # channel_group_name: "exampleChannelGroup", + # channel_name: "exampleChannelName", + # harvest_job_name: "HarvestJobName", + # origin_endpoint_name: "exampleOriginEndpointName", + # }) + # + # resp.to_h outputs the following: + # { + # arn: "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannelName/originEndpoint/exampleOriginEndpointName/harvestJob/HarvestJobName", + # channel_group_name: "exampleChannelGroup", + # channel_name: "exampleChannelName", + # created_at: Time.parse("2024-05-28T09:36:00.00Z"), + # description: "Example HarvestJob description", + # destination: { + # s3_destination: { + # bucket_name: "harvestJobS3DestinationBucket", + # destination_path: "manifests", + # }, + # }, + # etag: "GlfT+dwAyGIR4wuy8nKWl1RDPwSrjQej9qUutLZxoxk=", + # harvest_job_name: "HarvestJobName", + # harvested_manifests: { + # dash_manifests: [ + # { + # manifest_name: "DashManifest", + # }, + # ], + # hls_manifests: [ + # { + # manifest_name: "HlsManifest", + # }, + # ], + # low_latency_hls_manifests: [ + # { + # manifest_name: "LowLatencyHlsManifest", + # }, + # ], + # }, + # modified_at: Time.parse("2024-05-28T09:36:00.00Z"), + # origin_endpoint_name: "exampleOriginEndpointName", + # schedule_configuration: { + # end_time: Time.parse("2024-05-28T12:00:00.00Z"), + # start_time: Time.parse("2024-05-28T06:00:00.00Z"), + # }, + # status: "QUEUED", + # tags: { + # "key1" => "value1", + # "key2" => "value2", + # }, + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_harvest_job({ + # channel_group_name: "ResourceName", # required + # channel_name: "ResourceName", # required + # origin_endpoint_name: "ResourceName", # required + # harvest_job_name: "ResourceName", # required + # }) + # + # @example Response structure + # + # resp.channel_group_name #=> String + # resp.channel_name #=> String + # resp.origin_endpoint_name #=> String + # resp.destination.s3_destination.bucket_name #=> String + # resp.destination.s3_destination.destination_path #=> String + # resp.harvest_job_name #=> String + # resp.harvested_manifests.hls_manifests #=> Array + # resp.harvested_manifests.hls_manifests[0].manifest_name #=> String + # resp.harvested_manifests.dash_manifests #=> Array + # resp.harvested_manifests.dash_manifests[0].manifest_name #=> String + # resp.harvested_manifests.low_latency_hls_manifests #=> Array + # resp.harvested_manifests.low_latency_hls_manifests[0].manifest_name #=> String + # resp.description #=> String + # resp.schedule_configuration.start_time #=> Time + # resp.schedule_configuration.end_time #=> Time + # resp.arn #=> String + # resp.created_at #=> Time + # resp.modified_at #=> Time + # resp.status #=> String, one of "QUEUED", "IN_PROGRESS", "CANCELLED", "COMPLETED", "FAILED" + # resp.error_message #=> String + # resp.etag #=> String + # resp.tags #=> Hash + # resp.tags["TagKey"] #=> String + # + # + # The following waiters are defined for this operation (see {Client#wait_until} for detailed usage): + # + # * harvest_job_finished + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/GetHarvestJob AWS API Documentation + # + # @overload get_harvest_job(params = {}) + # @param [Hash] params ({}) + def get_harvest_job(params = {}, options = {}) + req = build_request(:get_harvest_job, params) + req.send_request(options) + end + # Retrieves the specified origin endpoint that's configured in AWS # Elemental MediaPackage to obtain its playback URL and to view the # packaging settings that it's currently using. @@ -2357,6 +2766,87 @@ def list_channels(params = {}, options = {}) req.send_request(options) end + # Retrieves a list of harvest jobs that match the specified criteria. + # + # @option params [required, String] :channel_group_name + # The name of the channel group to filter the harvest jobs by. If + # specified, only harvest jobs associated with channels in this group + # will be returned. + # + # @option params [String] :channel_name + # The name of the channel to filter the harvest jobs by. If specified, + # only harvest jobs associated with this channel will be returned. + # + # @option params [String] :origin_endpoint_name + # The name of the origin endpoint to filter the harvest jobs by. If + # specified, only harvest jobs associated with this origin endpoint will + # be returned. + # + # @option params [String] :status + # The status to filter the harvest jobs by. If specified, only harvest + # jobs with this status will be returned. + # + # @option params [Integer] :max_results + # The maximum number of harvest jobs to return in a single request. If + # not specified, a default value will be used. + # + # @option params [String] :next_token + # A token used for pagination. Provide this value in subsequent requests + # to retrieve the next set of results. + # + # @return [Types::ListHarvestJobsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListHarvestJobsResponse#items #items} => Array<Types::HarvestJob> + # * {Types::ListHarvestJobsResponse#next_token #next_token} => String + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # @example Request syntax with placeholder values + # + # resp = client.list_harvest_jobs({ + # channel_group_name: "ResourceName", # required + # channel_name: "ListHarvestJobsRequestChannelNameString", + # origin_endpoint_name: "ListHarvestJobsRequestOriginEndpointNameString", + # status: "QUEUED", # accepts QUEUED, IN_PROGRESS, CANCELLED, COMPLETED, FAILED + # max_results: 1, + # next_token: "String", + # }) + # + # @example Response structure + # + # resp.items #=> Array + # resp.items[0].channel_group_name #=> String + # resp.items[0].channel_name #=> String + # resp.items[0].origin_endpoint_name #=> String + # resp.items[0].destination.s3_destination.bucket_name #=> String + # resp.items[0].destination.s3_destination.destination_path #=> String + # resp.items[0].harvest_job_name #=> String + # resp.items[0].harvested_manifests.hls_manifests #=> Array + # resp.items[0].harvested_manifests.hls_manifests[0].manifest_name #=> String + # resp.items[0].harvested_manifests.dash_manifests #=> Array + # resp.items[0].harvested_manifests.dash_manifests[0].manifest_name #=> String + # resp.items[0].harvested_manifests.low_latency_hls_manifests #=> Array + # resp.items[0].harvested_manifests.low_latency_hls_manifests[0].manifest_name #=> String + # resp.items[0].description #=> String + # resp.items[0].schedule_configuration.start_time #=> Time + # resp.items[0].schedule_configuration.end_time #=> Time + # resp.items[0].arn #=> String + # resp.items[0].created_at #=> Time + # resp.items[0].modified_at #=> Time + # resp.items[0].status #=> String, one of "QUEUED", "IN_PROGRESS", "CANCELLED", "COMPLETED", "FAILED" + # resp.items[0].error_message #=> String + # resp.items[0].etag #=> String + # resp.next_token #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/ListHarvestJobs AWS API Documentation + # + # @overload list_harvest_jobs(params = {}) + # @param [Hash] params ({}) + def list_harvest_jobs(params = {}, options = {}) + req = build_request(:list_harvest_jobs, params) + req.send_request(options) + end + # Retrieves all origin endpoints in a specific channel that are # configured in AWS Elemental MediaPackage. # @@ -3464,14 +3954,127 @@ def build_request(operation_name, params = {}) tracer: tracer ) context[:gem_name] = 'aws-sdk-mediapackagev2' - context[:gem_version] = '1.30.0' + context[:gem_version] = '1.31.0' Seahorse::Client::Request.new(handlers, context) end + # Polls an API operation until a resource enters a desired state. + # + # ## Basic Usage + # + # A waiter will call an API operation until: + # + # * It is successful + # * It enters a terminal state + # * It makes the maximum number of attempts + # + # In between attempts, the waiter will sleep. + # + # # polls in a loop, sleeping between attempts + # client.wait_until(waiter_name, params) + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. You can pass + # configuration as the final arguments hash. + # + # # poll for ~25 seconds + # client.wait_until(waiter_name, params, { + # max_attempts: 5, + # delay: 5, + # }) + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # client.wait_until(waiter_name, params, { + # + # # disable max attempts + # max_attempts: nil, + # + # # poll for 1 hour, instead of a number of attempts + # before_wait: -> (attempts, response) do + # throw :failure if Time.now - started_at > 3600 + # end + # }) + # + # ## Handling Errors + # + # When a waiter is unsuccessful, it will raise an error. + # All of the failure errors extend from + # {Aws::Waiters::Errors::WaiterFailed}. + # + # begin + # client.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # ## Valid Waiters + # + # The following table lists the valid waiter names, the operations they call, + # and the default `:delay` and `:max_attempts` values. + # + # | waiter_name | params | :delay | :max_attempts | + # | -------------------- | ------------------------ | -------- | ------------- | + # | harvest_job_finished | {Client#get_harvest_job} | 2 | 60 | + # + # @raise [Errors::FailureStateError] Raised when the waiter terminates + # because the waiter has entered a state that it will not transition + # out of, preventing success. + # + # @raise [Errors::TooManyAttemptsError] Raised when the configured + # maximum number of attempts have been made, and the waiter is not + # yet successful. + # + # @raise [Errors::UnexpectedError] Raised when an error is encounted + # while polling for a resource that is not expected. + # + # @raise [Errors::NoSuchWaiterError] Raised when you request to wait + # for an unknown state. + # + # @return [Boolean] Returns `true` if the waiter was successful. + # @param [Symbol] waiter_name + # @param [Hash] params ({}) + # @param [Hash] options ({}) + # @option options [Integer] :max_attempts + # @option options [Integer] :delay + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + def wait_until(waiter_name, params = {}, options = {}) + w = waiter(waiter_name, options) + yield(w.waiter) if block_given? # deprecated + w.wait(params) + end + # @api private # @deprecated def waiter_names - [] + waiters.keys + end + + private + + # @param [Symbol] waiter_name + # @param [Hash] options ({}) + def waiter(waiter_name, options = {}) + waiter_class = waiters[waiter_name] + if waiter_class + waiter_class.new(options.merge(client: self)) + else + raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys) + end + end + + def waiters + { + harvest_job_finished: Waiters::HarvestJobFinished + } end class << self diff --git a/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/client_api.rb b/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/client_api.rb index 6a22f9f7209..3dad5c4ad5b 100644 --- a/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/client_api.rb +++ b/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/client_api.rb @@ -18,6 +18,8 @@ module ClientApi AdMarkerDash = Shapes::StringShape.new(name: 'AdMarkerDash') AdMarkerHls = Shapes::StringShape.new(name: 'AdMarkerHls') Boolean = Shapes::BooleanShape.new(name: 'Boolean') + CancelHarvestJobRequest = Shapes::StructureShape.new(name: 'CancelHarvestJobRequest') + CancelHarvestJobResponse = Shapes::StructureShape.new(name: 'CancelHarvestJobResponse') ChannelGroupListConfiguration = Shapes::StructureShape.new(name: 'ChannelGroupListConfiguration') ChannelGroupsList = Shapes::ListShape.new(name: 'ChannelGroupsList') ChannelList = Shapes::ListShape.new(name: 'ChannelList') @@ -36,6 +38,8 @@ module ClientApi CreateDashManifestConfigurationMinUpdatePeriodSecondsInteger = Shapes::IntegerShape.new(name: 'CreateDashManifestConfigurationMinUpdatePeriodSecondsInteger') CreateDashManifestConfigurationSuggestedPresentationDelaySecondsInteger = Shapes::IntegerShape.new(name: 'CreateDashManifestConfigurationSuggestedPresentationDelaySecondsInteger') CreateDashManifests = Shapes::ListShape.new(name: 'CreateDashManifests') + CreateHarvestJobRequest = Shapes::StructureShape.new(name: 'CreateHarvestJobRequest') + CreateHarvestJobResponse = Shapes::StructureShape.new(name: 'CreateHarvestJobResponse') CreateHlsManifestConfiguration = Shapes::StructureShape.new(name: 'CreateHlsManifestConfiguration') CreateHlsManifestConfigurationManifestWindowSecondsInteger = Shapes::IntegerShape.new(name: 'CreateHlsManifestConfigurationManifestWindowSecondsInteger') CreateHlsManifestConfigurationProgramDateTimeIntervalSecondsInteger = Shapes::IntegerShape.new(name: 'CreateHlsManifestConfigurationProgramDateTimeIntervalSecondsInteger') @@ -64,6 +68,7 @@ module ClientApi DeleteOriginEndpointPolicyResponse = Shapes::StructureShape.new(name: 'DeleteOriginEndpointPolicyResponse') DeleteOriginEndpointRequest = Shapes::StructureShape.new(name: 'DeleteOriginEndpointRequest') DeleteOriginEndpointResponse = Shapes::StructureShape.new(name: 'DeleteOriginEndpointResponse') + Destination = Shapes::StructureShape.new(name: 'Destination') DrmSystem = Shapes::StringShape.new(name: 'DrmSystem') Encryption = Shapes::StructureShape.new(name: 'Encryption') EncryptionConstantInitializationVectorString = Shapes::StringShape.new(name: 'EncryptionConstantInitializationVectorString') @@ -86,6 +91,8 @@ module ClientApi GetChannelResponse = Shapes::StructureShape.new(name: 'GetChannelResponse') GetDashManifestConfiguration = Shapes::StructureShape.new(name: 'GetDashManifestConfiguration') GetDashManifests = Shapes::ListShape.new(name: 'GetDashManifests') + GetHarvestJobRequest = Shapes::StructureShape.new(name: 'GetHarvestJobRequest') + GetHarvestJobResponse = Shapes::StructureShape.new(name: 'GetHarvestJobResponse') GetHlsManifestConfiguration = Shapes::StructureShape.new(name: 'GetHlsManifestConfiguration') GetHlsManifests = Shapes::ListShape.new(name: 'GetHlsManifests') GetLowLatencyHlsManifestConfiguration = Shapes::StructureShape.new(name: 'GetLowLatencyHlsManifestConfiguration') @@ -94,6 +101,17 @@ module ClientApi GetOriginEndpointPolicyResponse = Shapes::StructureShape.new(name: 'GetOriginEndpointPolicyResponse') GetOriginEndpointRequest = Shapes::StructureShape.new(name: 'GetOriginEndpointRequest') GetOriginEndpointResponse = Shapes::StructureShape.new(name: 'GetOriginEndpointResponse') + HarvestJob = Shapes::StructureShape.new(name: 'HarvestJob') + HarvestJobStatus = Shapes::StringShape.new(name: 'HarvestJobStatus') + HarvestJobsList = Shapes::ListShape.new(name: 'HarvestJobsList') + HarvestedDashManifest = Shapes::StructureShape.new(name: 'HarvestedDashManifest') + HarvestedDashManifestsList = Shapes::ListShape.new(name: 'HarvestedDashManifestsList') + HarvestedHlsManifest = Shapes::StructureShape.new(name: 'HarvestedHlsManifest') + HarvestedHlsManifestsList = Shapes::ListShape.new(name: 'HarvestedHlsManifestsList') + HarvestedLowLatencyHlsManifest = Shapes::StructureShape.new(name: 'HarvestedLowLatencyHlsManifest') + HarvestedLowLatencyHlsManifestsList = Shapes::ListShape.new(name: 'HarvestedLowLatencyHlsManifestsList') + HarvestedManifests = Shapes::StructureShape.new(name: 'HarvestedManifests') + HarvesterScheduleConfiguration = Shapes::StructureShape.new(name: 'HarvesterScheduleConfiguration') IdempotencyToken = Shapes::StringShape.new(name: 'IdempotencyToken') IngestEndpoint = Shapes::StructureShape.new(name: 'IngestEndpoint') IngestEndpointList = Shapes::ListShape.new(name: 'IngestEndpointList') @@ -106,6 +124,11 @@ module ClientApi ListChannelsResponse = Shapes::StructureShape.new(name: 'ListChannelsResponse') ListDashManifestConfiguration = Shapes::StructureShape.new(name: 'ListDashManifestConfiguration') ListDashManifests = Shapes::ListShape.new(name: 'ListDashManifests') + ListHarvestJobsRequest = Shapes::StructureShape.new(name: 'ListHarvestJobsRequest') + ListHarvestJobsRequestChannelNameString = Shapes::StringShape.new(name: 'ListHarvestJobsRequestChannelNameString') + ListHarvestJobsRequestMaxResultsInteger = Shapes::IntegerShape.new(name: 'ListHarvestJobsRequestMaxResultsInteger') + ListHarvestJobsRequestOriginEndpointNameString = Shapes::StringShape.new(name: 'ListHarvestJobsRequestOriginEndpointNameString') + ListHarvestJobsResponse = Shapes::StructureShape.new(name: 'ListHarvestJobsResponse') ListHlsManifestConfiguration = Shapes::StructureShape.new(name: 'ListHlsManifestConfiguration') ListHlsManifests = Shapes::ListShape.new(name: 'ListHlsManifests') ListLowLatencyHlsManifestConfiguration = Shapes::StructureShape.new(name: 'ListLowLatencyHlsManifestConfiguration') @@ -129,6 +152,9 @@ module ClientApi ResourceName = Shapes::StringShape.new(name: 'ResourceName') ResourceNotFoundException = Shapes::StructureShape.new(name: 'ResourceNotFoundException') ResourceTypeNotFound = Shapes::StringShape.new(name: 'ResourceTypeNotFound') + S3BucketName = Shapes::StringShape.new(name: 'S3BucketName') + S3DestinationConfig = Shapes::StructureShape.new(name: 'S3DestinationConfig') + S3DestinationPath = Shapes::StringShape.new(name: 'S3DestinationPath') Scte = Shapes::StructureShape.new(name: 'Scte') ScteDash = Shapes::StructureShape.new(name: 'ScteDash') ScteFilter = Shapes::StringShape.new(name: 'ScteFilter') @@ -168,6 +194,15 @@ module ClientApi AccessDeniedException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "Message")) AccessDeniedException.struct_class = Types::AccessDeniedException + CancelHarvestJobRequest.add_member(:channel_group_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "ChannelGroupName")) + CancelHarvestJobRequest.add_member(:channel_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "ChannelName")) + CancelHarvestJobRequest.add_member(:origin_endpoint_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "OriginEndpointName")) + CancelHarvestJobRequest.add_member(:harvest_job_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "HarvestJobName")) + CancelHarvestJobRequest.add_member(:etag, Shapes::ShapeRef.new(shape: EntityTag, location: "header", location_name: "x-amzn-update-if-match")) + CancelHarvestJobRequest.struct_class = Types::CancelHarvestJobRequest + + CancelHarvestJobResponse.struct_class = Types::CancelHarvestJobResponse + ChannelGroupListConfiguration.add_member(:channel_group_name, Shapes::ShapeRef.new(shape: String, required: true, location_name: "ChannelGroupName")) ChannelGroupListConfiguration.add_member(:arn, Shapes::ShapeRef.new(shape: String, required: true, location_name: "Arn")) ChannelGroupListConfiguration.add_member(:created_at, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "CreatedAt")) @@ -243,6 +278,35 @@ module ClientApi CreateDashManifests.member = Shapes::ShapeRef.new(shape: CreateDashManifestConfiguration) + CreateHarvestJobRequest.add_member(:channel_group_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "ChannelGroupName")) + CreateHarvestJobRequest.add_member(:channel_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "ChannelName")) + CreateHarvestJobRequest.add_member(:origin_endpoint_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "OriginEndpointName")) + CreateHarvestJobRequest.add_member(:description, Shapes::ShapeRef.new(shape: ResourceDescription, location_name: "Description")) + CreateHarvestJobRequest.add_member(:harvested_manifests, Shapes::ShapeRef.new(shape: HarvestedManifests, required: true, location_name: "HarvestedManifests")) + CreateHarvestJobRequest.add_member(:schedule_configuration, Shapes::ShapeRef.new(shape: HarvesterScheduleConfiguration, required: true, location_name: "ScheduleConfiguration")) + CreateHarvestJobRequest.add_member(:destination, Shapes::ShapeRef.new(shape: Destination, required: true, location_name: "Destination")) + CreateHarvestJobRequest.add_member(:client_token, Shapes::ShapeRef.new(shape: IdempotencyToken, location: "header", location_name: "x-amzn-client-token", metadata: {"idempotencyToken"=>true})) + CreateHarvestJobRequest.add_member(:harvest_job_name, Shapes::ShapeRef.new(shape: ResourceName, location_name: "HarvestJobName")) + CreateHarvestJobRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "Tags")) + CreateHarvestJobRequest.struct_class = Types::CreateHarvestJobRequest + + CreateHarvestJobResponse.add_member(:channel_group_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "ChannelGroupName")) + CreateHarvestJobResponse.add_member(:channel_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "ChannelName")) + CreateHarvestJobResponse.add_member(:origin_endpoint_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "OriginEndpointName")) + CreateHarvestJobResponse.add_member(:destination, Shapes::ShapeRef.new(shape: Destination, required: true, location_name: "Destination")) + CreateHarvestJobResponse.add_member(:harvest_job_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "HarvestJobName")) + CreateHarvestJobResponse.add_member(:harvested_manifests, Shapes::ShapeRef.new(shape: HarvestedManifests, required: true, location_name: "HarvestedManifests")) + CreateHarvestJobResponse.add_member(:description, Shapes::ShapeRef.new(shape: ResourceDescription, location_name: "Description")) + CreateHarvestJobResponse.add_member(:schedule_configuration, Shapes::ShapeRef.new(shape: HarvesterScheduleConfiguration, required: true, location_name: "ScheduleConfiguration")) + CreateHarvestJobResponse.add_member(:arn, Shapes::ShapeRef.new(shape: String, required: true, location_name: "Arn")) + CreateHarvestJobResponse.add_member(:created_at, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "CreatedAt")) + CreateHarvestJobResponse.add_member(:modified_at, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "ModifiedAt")) + CreateHarvestJobResponse.add_member(:status, Shapes::ShapeRef.new(shape: HarvestJobStatus, required: true, location_name: "Status")) + CreateHarvestJobResponse.add_member(:error_message, Shapes::ShapeRef.new(shape: String, location_name: "ErrorMessage")) + CreateHarvestJobResponse.add_member(:etag, Shapes::ShapeRef.new(shape: EntityTag, location_name: "ETag")) + CreateHarvestJobResponse.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "Tags")) + CreateHarvestJobResponse.struct_class = Types::CreateHarvestJobResponse + CreateHlsManifestConfiguration.add_member(:manifest_name, Shapes::ShapeRef.new(shape: ManifestName, required: true, location_name: "ManifestName")) CreateHlsManifestConfiguration.add_member(:child_manifest_name, Shapes::ShapeRef.new(shape: ManifestName, location_name: "ChildManifestName")) CreateHlsManifestConfiguration.add_member(:scte_hls, Shapes::ShapeRef.new(shape: ScteHls, location_name: "ScteHls")) @@ -335,6 +399,9 @@ module ClientApi DeleteOriginEndpointResponse.struct_class = Types::DeleteOriginEndpointResponse + Destination.add_member(:s3_destination, Shapes::ShapeRef.new(shape: S3DestinationConfig, required: true, location_name: "S3Destination")) + Destination.struct_class = Types::Destination + Encryption.add_member(:constant_initialization_vector, Shapes::ShapeRef.new(shape: EncryptionConstantInitializationVectorString, location_name: "ConstantInitializationVector")) Encryption.add_member(:encryption_method, Shapes::ShapeRef.new(shape: EncryptionMethod, required: true, location_name: "EncryptionMethod")) Encryption.add_member(:key_rotation_interval_seconds, Shapes::ShapeRef.new(shape: EncryptionKeyRotationIntervalSecondsInteger, location_name: "KeyRotationIntervalSeconds")) @@ -415,6 +482,29 @@ module ClientApi GetDashManifests.member = Shapes::ShapeRef.new(shape: GetDashManifestConfiguration) + GetHarvestJobRequest.add_member(:channel_group_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "ChannelGroupName")) + GetHarvestJobRequest.add_member(:channel_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "ChannelName")) + GetHarvestJobRequest.add_member(:origin_endpoint_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "OriginEndpointName")) + GetHarvestJobRequest.add_member(:harvest_job_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "HarvestJobName")) + GetHarvestJobRequest.struct_class = Types::GetHarvestJobRequest + + GetHarvestJobResponse.add_member(:channel_group_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "ChannelGroupName")) + GetHarvestJobResponse.add_member(:channel_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "ChannelName")) + GetHarvestJobResponse.add_member(:origin_endpoint_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "OriginEndpointName")) + GetHarvestJobResponse.add_member(:destination, Shapes::ShapeRef.new(shape: Destination, required: true, location_name: "Destination")) + GetHarvestJobResponse.add_member(:harvest_job_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "HarvestJobName")) + GetHarvestJobResponse.add_member(:harvested_manifests, Shapes::ShapeRef.new(shape: HarvestedManifests, required: true, location_name: "HarvestedManifests")) + GetHarvestJobResponse.add_member(:description, Shapes::ShapeRef.new(shape: ResourceDescription, location_name: "Description")) + GetHarvestJobResponse.add_member(:schedule_configuration, Shapes::ShapeRef.new(shape: HarvesterScheduleConfiguration, required: true, location_name: "ScheduleConfiguration")) + GetHarvestJobResponse.add_member(:arn, Shapes::ShapeRef.new(shape: String, required: true, location_name: "Arn")) + GetHarvestJobResponse.add_member(:created_at, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "CreatedAt")) + GetHarvestJobResponse.add_member(:modified_at, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "ModifiedAt")) + GetHarvestJobResponse.add_member(:status, Shapes::ShapeRef.new(shape: HarvestJobStatus, required: true, location_name: "Status")) + GetHarvestJobResponse.add_member(:error_message, Shapes::ShapeRef.new(shape: String, location_name: "ErrorMessage")) + GetHarvestJobResponse.add_member(:etag, Shapes::ShapeRef.new(shape: EntityTag, location_name: "ETag")) + GetHarvestJobResponse.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "Tags")) + GetHarvestJobResponse.struct_class = Types::GetHarvestJobResponse + GetHlsManifestConfiguration.add_member(:manifest_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "ManifestName")) GetHlsManifestConfiguration.add_member(:url, Shapes::ShapeRef.new(shape: String, required: true, location_name: "Url")) GetHlsManifestConfiguration.add_member(:child_manifest_name, Shapes::ShapeRef.new(shape: ResourceName, location_name: "ChildManifestName")) @@ -473,6 +563,48 @@ module ClientApi GetOriginEndpointResponse.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "Tags")) GetOriginEndpointResponse.struct_class = Types::GetOriginEndpointResponse + HarvestJob.add_member(:channel_group_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "ChannelGroupName")) + HarvestJob.add_member(:channel_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "ChannelName")) + HarvestJob.add_member(:origin_endpoint_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "OriginEndpointName")) + HarvestJob.add_member(:destination, Shapes::ShapeRef.new(shape: Destination, required: true, location_name: "Destination")) + HarvestJob.add_member(:harvest_job_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "HarvestJobName")) + HarvestJob.add_member(:harvested_manifests, Shapes::ShapeRef.new(shape: HarvestedManifests, required: true, location_name: "HarvestedManifests")) + HarvestJob.add_member(:description, Shapes::ShapeRef.new(shape: ResourceDescription, location_name: "Description")) + HarvestJob.add_member(:schedule_configuration, Shapes::ShapeRef.new(shape: HarvesterScheduleConfiguration, required: true, location_name: "ScheduleConfiguration")) + HarvestJob.add_member(:arn, Shapes::ShapeRef.new(shape: String, required: true, location_name: "Arn")) + HarvestJob.add_member(:created_at, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "CreatedAt")) + HarvestJob.add_member(:modified_at, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "ModifiedAt")) + HarvestJob.add_member(:status, Shapes::ShapeRef.new(shape: HarvestJobStatus, required: true, location_name: "Status")) + HarvestJob.add_member(:error_message, Shapes::ShapeRef.new(shape: String, location_name: "ErrorMessage")) + HarvestJob.add_member(:etag, Shapes::ShapeRef.new(shape: EntityTag, location_name: "ETag")) + HarvestJob.struct_class = Types::HarvestJob + + HarvestJobsList.member = Shapes::ShapeRef.new(shape: HarvestJob) + + HarvestedDashManifest.add_member(:manifest_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "ManifestName")) + HarvestedDashManifest.struct_class = Types::HarvestedDashManifest + + HarvestedDashManifestsList.member = Shapes::ShapeRef.new(shape: HarvestedDashManifest) + + HarvestedHlsManifest.add_member(:manifest_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "ManifestName")) + HarvestedHlsManifest.struct_class = Types::HarvestedHlsManifest + + HarvestedHlsManifestsList.member = Shapes::ShapeRef.new(shape: HarvestedHlsManifest) + + HarvestedLowLatencyHlsManifest.add_member(:manifest_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "ManifestName")) + HarvestedLowLatencyHlsManifest.struct_class = Types::HarvestedLowLatencyHlsManifest + + HarvestedLowLatencyHlsManifestsList.member = Shapes::ShapeRef.new(shape: HarvestedLowLatencyHlsManifest) + + HarvestedManifests.add_member(:hls_manifests, Shapes::ShapeRef.new(shape: HarvestedHlsManifestsList, location_name: "HlsManifests")) + HarvestedManifests.add_member(:dash_manifests, Shapes::ShapeRef.new(shape: HarvestedDashManifestsList, location_name: "DashManifests")) + HarvestedManifests.add_member(:low_latency_hls_manifests, Shapes::ShapeRef.new(shape: HarvestedLowLatencyHlsManifestsList, location_name: "LowLatencyHlsManifests")) + HarvestedManifests.struct_class = Types::HarvestedManifests + + HarvesterScheduleConfiguration.add_member(:start_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "StartTime")) + HarvesterScheduleConfiguration.add_member(:end_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "EndTime")) + HarvesterScheduleConfiguration.struct_class = Types::HarvesterScheduleConfiguration + IngestEndpoint.add_member(:id, Shapes::ShapeRef.new(shape: String, location_name: "Id")) IngestEndpoint.add_member(:url, Shapes::ShapeRef.new(shape: String, location_name: "Url")) IngestEndpoint.struct_class = Types::IngestEndpoint @@ -505,6 +637,18 @@ module ClientApi ListDashManifests.member = Shapes::ShapeRef.new(shape: ListDashManifestConfiguration) + ListHarvestJobsRequest.add_member(:channel_group_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location: "uri", location_name: "ChannelGroupName")) + ListHarvestJobsRequest.add_member(:channel_name, Shapes::ShapeRef.new(shape: ListHarvestJobsRequestChannelNameString, location: "querystring", location_name: "channelName")) + ListHarvestJobsRequest.add_member(:origin_endpoint_name, Shapes::ShapeRef.new(shape: ListHarvestJobsRequestOriginEndpointNameString, location: "querystring", location_name: "originEndpointName")) + ListHarvestJobsRequest.add_member(:status, Shapes::ShapeRef.new(shape: HarvestJobStatus, location: "querystring", location_name: "includeStatus")) + ListHarvestJobsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: ListHarvestJobsRequestMaxResultsInteger, location: "querystring", location_name: "maxResults")) + ListHarvestJobsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location: "querystring", location_name: "nextToken")) + ListHarvestJobsRequest.struct_class = Types::ListHarvestJobsRequest + + ListHarvestJobsResponse.add_member(:items, Shapes::ShapeRef.new(shape: HarvestJobsList, location_name: "Items")) + ListHarvestJobsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "NextToken")) + ListHarvestJobsResponse.struct_class = Types::ListHarvestJobsResponse + ListHlsManifestConfiguration.add_member(:manifest_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "ManifestName")) ListHlsManifestConfiguration.add_member(:child_manifest_name, Shapes::ShapeRef.new(shape: ResourceName, location_name: "ChildManifestName")) ListHlsManifestConfiguration.add_member(:url, Shapes::ShapeRef.new(shape: String, location_name: "Url")) @@ -570,6 +714,10 @@ module ClientApi ResourceNotFoundException.add_member(:resource_type_not_found, Shapes::ShapeRef.new(shape: ResourceTypeNotFound, location_name: "ResourceTypeNotFound")) ResourceNotFoundException.struct_class = Types::ResourceNotFoundException + S3DestinationConfig.add_member(:bucket_name, Shapes::ShapeRef.new(shape: S3BucketName, required: true, location_name: "BucketName")) + S3DestinationConfig.add_member(:destination_path, Shapes::ShapeRef.new(shape: S3DestinationPath, required: true, location_name: "DestinationPath")) + S3DestinationConfig.struct_class = Types::S3DestinationConfig + Scte.add_member(:scte_filter, Shapes::ShapeRef.new(shape: ScteFilterList, location_name: "ScteFilter")) Scte.struct_class = Types::Scte @@ -699,6 +847,7 @@ module ClientApi api.metadata = { "apiVersion" => "2022-12-25", + "auth" => ["aws.auth#sigv4"], "endpointPrefix" => "mediapackagev2", "protocol" => "rest-json", "protocols" => ["rest-json"], @@ -710,6 +859,20 @@ module ClientApi "uid" => "mediapackagev2-2022-12-25", } + api.add_operation(:cancel_harvest_job, Seahorse::Model::Operation.new.tap do |o| + o.name = "CancelHarvestJob" + o.http_method = "PUT" + o.http_request_uri = "/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/harvestJob/{HarvestJobName}" + o.input = Shapes::ShapeRef.new(shape: CancelHarvestJobRequest) + o.output = Shapes::ShapeRef.new(shape: CancelHarvestJobResponse) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: ConflictException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerException) + o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException) + o.errors << Shapes::ShapeRef.new(shape: ValidationException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + end) + api.add_operation(:create_channel, Seahorse::Model::Operation.new.tap do |o| o.name = "CreateChannel" o.http_method = "POST" @@ -740,6 +903,21 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException) end) + api.add_operation(:create_harvest_job, Seahorse::Model::Operation.new.tap do |o| + o.name = "CreateHarvestJob" + o.http_method = "POST" + o.http_request_uri = "/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/harvestJob" + o.input = Shapes::ShapeRef.new(shape: CreateHarvestJobRequest) + o.output = Shapes::ShapeRef.new(shape: CreateHarvestJobResponse) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: ConflictException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerException) + o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException) + o.errors << Shapes::ShapeRef.new(shape: ValidationException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException) + end) + api.add_operation(:create_origin_endpoint, Seahorse::Model::Operation.new.tap do |o| o.name = "CreateOriginEndpoint" o.http_method = "POST" @@ -858,6 +1036,19 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) end) + api.add_operation(:get_harvest_job, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetHarvestJob" + o.http_method = "GET" + o.http_request_uri = "/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/harvestJob/{HarvestJobName}" + o.input = Shapes::ShapeRef.new(shape: GetHarvestJobRequest) + o.output = Shapes::ShapeRef.new(shape: GetHarvestJobResponse) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerException) + o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException) + o.errors << Shapes::ShapeRef.new(shape: ValidationException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + end) + api.add_operation(:get_origin_endpoint, Seahorse::Model::Operation.new.tap do |o| o.name = "GetOriginEndpoint" o.http_method = "GET" @@ -921,6 +1112,25 @@ module ClientApi ) end) + api.add_operation(:list_harvest_jobs, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListHarvestJobs" + o.http_method = "GET" + o.http_request_uri = "/channelGroup/{ChannelGroupName}/harvestJob" + o.input = Shapes::ShapeRef.new(shape: ListHarvestJobsRequest) + o.output = Shapes::ShapeRef.new(shape: ListHarvestJobsResponse) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerException) + o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException) + o.errors << Shapes::ShapeRef.new(shape: ValidationException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o[:pager] = Aws::Pager.new( + limit_key: "max_results", + tokens: { + "next_token" => "next_token" + } + ) + end) + api.add_operation(:list_origin_endpoints, Seahorse::Model::Operation.new.tap do |o| o.name = "ListOriginEndpoints" o.http_method = "GET" diff --git a/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/types.rb b/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/types.rb index 9ca3a4f99c4..660e71ff413 100644 --- a/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/types.rb +++ b/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/types.rb @@ -26,6 +26,47 @@ class AccessDeniedException < Struct.new( include Aws::Structure end + # @!attribute [rw] channel_group_name + # The name of the channel group containing the channel from which the + # harvest job is running. + # @return [String] + # + # @!attribute [rw] channel_name + # The name of the channel from which the harvest job is running. + # @return [String] + # + # @!attribute [rw] origin_endpoint_name + # The name of the origin endpoint that the harvest job is harvesting + # from. This cannot be changed after the harvest job is submitted. + # @return [String] + # + # @!attribute [rw] harvest_job_name + # The name of the harvest job to cancel. This name must be unique + # within the channel and cannot be changed after the harvest job is + # submitted. + # @return [String] + # + # @!attribute [rw] etag + # The current Entity Tag (ETag) associated with the harvest job. Used + # for concurrency control. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/CancelHarvestJobRequest AWS API Documentation + # + class CancelHarvestJobRequest < Struct.new( + :channel_group_name, + :channel_name, + :origin_endpoint_name, + :harvest_job_name, + :etag) + SENSITIVE = [] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/CancelHarvestJobResponse AWS API Documentation + # + class CancelHarvestJobResponse < Aws::EmptyStructure; end + # The configuration of the channel group. # # @!attribute [rw] channel_group_name @@ -463,6 +504,161 @@ class CreateDashManifestConfiguration < Struct.new( include Aws::Structure end + # The request object for creating a new harvest job. + # + # @!attribute [rw] channel_group_name + # The name of the channel group containing the channel from which to + # harvest content. + # @return [String] + # + # @!attribute [rw] channel_name + # The name of the channel from which to harvest content. + # @return [String] + # + # @!attribute [rw] origin_endpoint_name + # The name of the origin endpoint from which to harvest content. + # @return [String] + # + # @!attribute [rw] description + # An optional description for the harvest job. + # @return [String] + # + # @!attribute [rw] harvested_manifests + # A list of manifests to be harvested. + # @return [Types::HarvestedManifests] + # + # @!attribute [rw] schedule_configuration + # The configuration for when the harvest job should run, including + # start and end times. + # @return [Types::HarvesterScheduleConfiguration] + # + # @!attribute [rw] destination + # The S3 destination where the harvested content will be placed. + # @return [Types::Destination] + # + # @!attribute [rw] client_token + # A unique, case-sensitive identifier that you provide to ensure the + # idempotency of the request. + # + # **A suitable default value is auto-generated.** You should normally + # not need to pass this option. + # @return [String] + # + # @!attribute [rw] harvest_job_name + # A name for the harvest job. This name must be unique within the + # channel. + # @return [String] + # + # @!attribute [rw] tags + # A collection of tags associated with the harvest job. + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/CreateHarvestJobRequest AWS API Documentation + # + class CreateHarvestJobRequest < Struct.new( + :channel_group_name, + :channel_name, + :origin_endpoint_name, + :description, + :harvested_manifests, + :schedule_configuration, + :destination, + :client_token, + :harvest_job_name, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # The response object returned after creating a harvest job. + # + # @!attribute [rw] channel_group_name + # The name of the channel group containing the channel from which + # content is being harvested. + # @return [String] + # + # @!attribute [rw] channel_name + # The name of the channel from which content is being harvested. + # @return [String] + # + # @!attribute [rw] origin_endpoint_name + # The name of the origin endpoint from which content is being + # harvested. + # @return [String] + # + # @!attribute [rw] destination + # The S3 destination where the harvested content will be placed. + # @return [Types::Destination] + # + # @!attribute [rw] harvest_job_name + # The name of the created harvest job. + # @return [String] + # + # @!attribute [rw] harvested_manifests + # A list of manifests that will be harvested. + # @return [Types::HarvestedManifests] + # + # @!attribute [rw] description + # The description of the harvest job, if provided. + # @return [String] + # + # @!attribute [rw] schedule_configuration + # The configuration for when the harvest job will run, including start + # and end times. + # @return [Types::HarvesterScheduleConfiguration] + # + # @!attribute [rw] arn + # The Amazon Resource Name (ARN) of the created harvest job. + # @return [String] + # + # @!attribute [rw] created_at + # The date and time the harvest job was created. + # @return [Time] + # + # @!attribute [rw] modified_at + # The date and time the harvest job was last modified. + # @return [Time] + # + # @!attribute [rw] status + # The current status of the harvest job (e.g., CREATED, IN\_PROGRESS, + # ABORTED, COMPLETED, FAILED). + # @return [String] + # + # @!attribute [rw] error_message + # An error message if the harvest job creation failed. + # @return [String] + # + # @!attribute [rw] etag + # The current version of the harvest job. Used for concurrency + # control. + # @return [String] + # + # @!attribute [rw] tags + # A collection of tags associated with the harvest job. + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/CreateHarvestJobResponse AWS API Documentation + # + class CreateHarvestJobResponse < Struct.new( + :channel_group_name, + :channel_name, + :origin_endpoint_name, + :destination, + :harvest_job_name, + :harvested_manifests, + :description, + :schedule_configuration, + :arn, + :created_at, + :modified_at, + :status, + :error_message, + :etag, + :tags) + SENSITIVE = [] + include Aws::Structure + end + # Create an HTTP live streaming (HLS) manifest configuration. # # @!attribute [rw] manifest_name @@ -951,6 +1147,23 @@ class DeleteOriginEndpointRequest < Struct.new( # class DeleteOriginEndpointResponse < Aws::EmptyStructure; end + # The configuration for the destination where the harvested content will + # be exported. + # + # @!attribute [rw] s3_destination + # The configuration for exporting harvested content to an S3 bucket. + # This includes details such as the bucket name and destination path + # within the bucket. + # @return [Types::S3DestinationConfig] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/Destination AWS API Documentation + # + class Destination < Struct.new( + :s3_destination) + SENSITIVE = [] + include Aws::Structure + end + # The parameters for encrypting content. # # @!attribute [rw] constant_initialization_vector @@ -1478,6 +1691,125 @@ class GetDashManifestConfiguration < Struct.new( include Aws::Structure end + # The request object for retrieving a specific harvest job. + # + # @!attribute [rw] channel_group_name + # The name of the channel group containing the channel associated with + # the harvest job. + # @return [String] + # + # @!attribute [rw] channel_name + # The name of the channel associated with the harvest job. + # @return [String] + # + # @!attribute [rw] origin_endpoint_name + # The name of the origin endpoint associated with the harvest job. + # @return [String] + # + # @!attribute [rw] harvest_job_name + # The name of the harvest job to retrieve. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/GetHarvestJobRequest AWS API Documentation + # + class GetHarvestJobRequest < Struct.new( + :channel_group_name, + :channel_name, + :origin_endpoint_name, + :harvest_job_name) + SENSITIVE = [] + include Aws::Structure + end + + # The response object containing the details of the requested harvest + # job. + # + # @!attribute [rw] channel_group_name + # The name of the channel group containing the channel associated with + # the harvest job. + # @return [String] + # + # @!attribute [rw] channel_name + # The name of the channel associated with the harvest job. + # @return [String] + # + # @!attribute [rw] origin_endpoint_name + # The name of the origin endpoint associated with the harvest job. + # @return [String] + # + # @!attribute [rw] destination + # The S3 destination where the harvested content is being placed. + # @return [Types::Destination] + # + # @!attribute [rw] harvest_job_name + # The name of the harvest job. + # @return [String] + # + # @!attribute [rw] harvested_manifests + # A list of manifests that are being or have been harvested. + # @return [Types::HarvestedManifests] + # + # @!attribute [rw] description + # The description of the harvest job, if provided. + # @return [String] + # + # @!attribute [rw] schedule_configuration + # The configuration for when the harvest job is scheduled to run, + # including start and end times. + # @return [Types::HarvesterScheduleConfiguration] + # + # @!attribute [rw] arn + # The Amazon Resource Name (ARN) of the harvest job. + # @return [String] + # + # @!attribute [rw] created_at + # The date and time when the harvest job was created. + # @return [Time] + # + # @!attribute [rw] modified_at + # The date and time when the harvest job was last modified. + # @return [Time] + # + # @!attribute [rw] status + # The current status of the harvest job (e.g., QUEUED, IN\_PROGRESS, + # CANCELLED, COMPLETED, FAILED). + # @return [String] + # + # @!attribute [rw] error_message + # An error message if the harvest job encountered any issues. + # @return [String] + # + # @!attribute [rw] etag + # The current version of the harvest job. Used for concurrency + # control. + # @return [String] + # + # @!attribute [rw] tags + # A collection of tags associated with the harvest job. + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/GetHarvestJobResponse AWS API Documentation + # + class GetHarvestJobResponse < Struct.new( + :channel_group_name, + :channel_name, + :origin_endpoint_name, + :destination, + :harvest_job_name, + :harvested_manifests, + :description, + :schedule_configuration, + :arn, + :created_at, + :modified_at, + :status, + :error_message, + :etag, + :tags) + SENSITIVE = [] + include Aws::Structure + end + # Retrieve the HTTP live streaming (HLS) manifest configuration. # # @!attribute [rw] manifest_name @@ -1815,6 +2147,174 @@ class GetOriginEndpointResponse < Struct.new( include Aws::Structure end + # Represents a harvest job resource in MediaPackage v2, which is used to + # export content from an origin endpoint to an S3 bucket. + # + # @!attribute [rw] channel_group_name + # The name of the channel group containing the channel associated with + # this harvest job. + # @return [String] + # + # @!attribute [rw] channel_name + # The name of the channel associated with this harvest job. + # @return [String] + # + # @!attribute [rw] origin_endpoint_name + # The name of the origin endpoint associated with this harvest job. + # @return [String] + # + # @!attribute [rw] destination + # The S3 destination where the harvested content will be placed. + # @return [Types::Destination] + # + # @!attribute [rw] harvest_job_name + # The name of the harvest job. + # @return [String] + # + # @!attribute [rw] harvested_manifests + # A list of manifests that are being or have been harvested. + # @return [Types::HarvestedManifests] + # + # @!attribute [rw] description + # An optional description of the harvest job. + # @return [String] + # + # @!attribute [rw] schedule_configuration + # The configuration for when the harvest job is scheduled to run. + # @return [Types::HarvesterScheduleConfiguration] + # + # @!attribute [rw] arn + # The Amazon Resource Name (ARN) of the harvest job. + # @return [String] + # + # @!attribute [rw] created_at + # The date and time when the harvest job was created. + # @return [Time] + # + # @!attribute [rw] modified_at + # The date and time when the harvest job was last modified. + # @return [Time] + # + # @!attribute [rw] status + # The current status of the harvest job (e.g., QUEUED, IN\_PROGRESS, + # CANCELLED, COMPLETED, FAILED). + # @return [String] + # + # @!attribute [rw] error_message + # An error message if the harvest job encountered any issues. + # @return [String] + # + # @!attribute [rw] etag + # The current version of the harvest job. Used for concurrency + # control. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/HarvestJob AWS API Documentation + # + class HarvestJob < Struct.new( + :channel_group_name, + :channel_name, + :origin_endpoint_name, + :destination, + :harvest_job_name, + :harvested_manifests, + :description, + :schedule_configuration, + :arn, + :created_at, + :modified_at, + :status, + :error_message, + :etag) + SENSITIVE = [] + include Aws::Structure + end + + # Information about a harvested DASH manifest. + # + # @!attribute [rw] manifest_name + # The name of the harvested DASH manifest. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/HarvestedDashManifest AWS API Documentation + # + class HarvestedDashManifest < Struct.new( + :manifest_name) + SENSITIVE = [] + include Aws::Structure + end + + # Information about a harvested HLS manifest. + # + # @!attribute [rw] manifest_name + # The name of the harvested HLS manifest. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/HarvestedHlsManifest AWS API Documentation + # + class HarvestedHlsManifest < Struct.new( + :manifest_name) + SENSITIVE = [] + include Aws::Structure + end + + # Information about a harvested Low-Latency HLS manifest. + # + # @!attribute [rw] manifest_name + # The name of the harvested Low-Latency HLS manifest. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/HarvestedLowLatencyHlsManifest AWS API Documentation + # + class HarvestedLowLatencyHlsManifest < Struct.new( + :manifest_name) + SENSITIVE = [] + include Aws::Structure + end + + # A collection of harvested manifests of different types. + # + # @!attribute [rw] hls_manifests + # A list of harvested HLS manifests. + # @return [Array] + # + # @!attribute [rw] dash_manifests + # A list of harvested DASH manifests. + # @return [Array] + # + # @!attribute [rw] low_latency_hls_manifests + # A list of harvested Low-Latency HLS manifests. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/HarvestedManifests AWS API Documentation + # + class HarvestedManifests < Struct.new( + :hls_manifests, + :dash_manifests, + :low_latency_hls_manifests) + SENSITIVE = [] + include Aws::Structure + end + + # Defines the schedule configuration for a harvest job. + # + # @!attribute [rw] start_time + # The start time for the harvest job. + # @return [Time] + # + # @!attribute [rw] end_time + # The end time for the harvest job. + # @return [Time] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/HarvesterScheduleConfiguration AWS API Documentation + # + class HarvesterScheduleConfiguration < Struct.new( + :start_time, + :end_time) + SENSITIVE = [] + include Aws::Structure + end + # The ingest domain URL where the source stream should be sent. # # @!attribute [rw] id @@ -1947,6 +2447,75 @@ class ListDashManifestConfiguration < Struct.new( include Aws::Structure end + # The request object for listing harvest jobs. + # + # @!attribute [rw] channel_group_name + # The name of the channel group to filter the harvest jobs by. If + # specified, only harvest jobs associated with channels in this group + # will be returned. + # @return [String] + # + # @!attribute [rw] channel_name + # The name of the channel to filter the harvest jobs by. If specified, + # only harvest jobs associated with this channel will be returned. + # @return [String] + # + # @!attribute [rw] origin_endpoint_name + # The name of the origin endpoint to filter the harvest jobs by. If + # specified, only harvest jobs associated with this origin endpoint + # will be returned. + # @return [String] + # + # @!attribute [rw] status + # The status to filter the harvest jobs by. If specified, only harvest + # jobs with this status will be returned. + # @return [String] + # + # @!attribute [rw] max_results + # The maximum number of harvest jobs to return in a single request. If + # not specified, a default value will be used. + # @return [Integer] + # + # @!attribute [rw] next_token + # A token used for pagination. Provide this value in subsequent + # requests to retrieve the next set of results. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/ListHarvestJobsRequest AWS API Documentation + # + class ListHarvestJobsRequest < Struct.new( + :channel_group_name, + :channel_name, + :origin_endpoint_name, + :status, + :max_results, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # The response object containing the list of harvest jobs that match the + # specified criteria. + # + # @!attribute [rw] items + # An array of harvest job objects that match the specified criteria. + # @return [Array] + # + # @!attribute [rw] next_token + # A token used for pagination. Include this value in subsequent + # requests to retrieve the next set of results. If null, there are no + # more results to retrieve. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/ListHarvestJobsResponse AWS API Documentation + # + class ListHarvestJobsResponse < Struct.new( + :items, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + # List the HTTP live streaming (HLS) manifest configuration. # # @!attribute [rw] manifest_name @@ -2255,6 +2824,28 @@ class ResourceNotFoundException < Struct.new( include Aws::Structure end + # Configuration parameters for where in an S3 bucket to place the + # harvested content. + # + # @!attribute [rw] bucket_name + # The name of an S3 bucket within which harvested content will be + # exported. + # @return [String] + # + # @!attribute [rw] destination_path + # The path within the specified S3 bucket where the harvested content + # will be placed. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/mediapackagev2-2022-12-25/S3DestinationConfig AWS API Documentation + # + class S3DestinationConfig < Struct.new( + :bucket_name, + :destination_path) + SENSITIVE = [] + include Aws::Structure + end + # The SCTE configuration. # # @!attribute [rw] scte_filter diff --git a/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/waiters.rb b/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/waiters.rb index bd45b29783f..5081f805bf4 100644 --- a/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/waiters.rb +++ b/gems/aws-sdk-mediapackagev2/lib/aws-sdk-mediapackagev2/waiters.rb @@ -10,6 +10,129 @@ require 'aws-sdk-core/waiters' module Aws::MediaPackageV2 + # Waiters are utility methods that poll for a particular state to occur + # on a client. Waiters can fail after a number of attempts at a polling + # interval defined for the service client. + # + # For a list of operations that can be waited for and the + # client methods called for each operation, see the table below or the + # {Client#wait_until} field documentation for the {Client}. + # + # # Invoking a Waiter + # To invoke a waiter, call #wait_until on a {Client}. The first parameter + # is the waiter name, which is specific to the service client and indicates + # which operation is being waited for. The second parameter is a hash of + # parameters that are passed to the client method called by the waiter, + # which varies according to the waiter name. + # + # # Wait Failures + # To catch errors in a waiter, use WaiterFailed, + # as shown in the following example. + # + # rescue rescue Aws::Waiters::Errors::WaiterFailed => error + # puts "failed waiting for instance running: #{error.message} + # end + # + # # Configuring a Waiter + # Each waiter has a default polling interval and a maximum number of + # attempts it will make before returning control to your program. + # To set these values, use the `max_attempts` and `delay` parameters + # in your `#wait_until` call. + # The following example waits for up to 25 seconds, polling every five seconds. + # + # client.wait_until(...) do |w| + # w.max_attempts = 5 + # w.delay = 5 + # end + # + # To disable wait failures, set the value of either of these parameters + # to `nil`. + # + # # Extending a Waiter + # To modify the behavior of waiters, you can register callbacks that are + # triggered before each polling attempt and before waiting. + # + # The following example implements an exponential backoff in a waiter + # by doubling the amount of time to wait on every attempt. + # + # client.wait_until(...) do |w| + # w.interval = 0 # disable normal sleep + # w.before_wait do |n, resp| + # sleep(n ** 2) + # end + # end + # + # # Available Waiters + # + # The following table lists the valid waiter names, the operations they call, + # and the default `:delay` and `:max_attempts` values. + # + # | waiter_name | params | :delay | :max_attempts | + # | -------------------- | ------------------------ | -------- | ------------- | + # | harvest_job_finished | {Client#get_harvest_job} | 2 | 60 | + # module Waiters + + class HarvestJobFinished + + # @param [Hash] options + # @option options [required, Client] :client + # @option options [Integer] :max_attempts (60) + # @option options [Integer] :delay (2) + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + def initialize(options) + @client = options.fetch(:client) + @waiter = Aws::Waiters::Waiter.new({ + max_attempts: 60, + delay: 2, + poller: Aws::Waiters::Poller.new( + operation_name: :get_harvest_job, + acceptors: [ + { + "matcher" => "path", + "argument" => "status", + "state" => "success", + "expected" => "COMPLETED" + }, + { + "matcher" => "path", + "argument" => "status", + "state" => "success", + "expected" => "CANCELLED" + }, + { + "matcher" => "path", + "argument" => "status", + "state" => "failure", + "expected" => "FAILED" + }, + { + "matcher" => "path", + "argument" => "status", + "state" => "retry", + "expected" => "QUEUED" + }, + { + "matcher" => "path", + "argument" => "status", + "state" => "retry", + "expected" => "IN_PROGRESS" + } + ] + ) + }.merge(options)) + end + + # @option (see Client#get_harvest_job) + # @return (see Client#get_harvest_job) + def wait(params = {}) + @waiter.wait(client: @client, params: params) + end + + # @api private + attr_reader :waiter + + end end end diff --git a/gems/aws-sdk-mediapackagev2/sig/client.rbs b/gems/aws-sdk-mediapackagev2/sig/client.rbs index e3e5bb342fa..51f17cb747d 100644 --- a/gems/aws-sdk-mediapackagev2/sig/client.rbs +++ b/gems/aws-sdk-mediapackagev2/sig/client.rbs @@ -75,6 +75,19 @@ module Aws | (?Hash[Symbol, untyped]) -> instance + interface _CancelHarvestJobResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::CancelHarvestJobResponse] + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/MediaPackageV2/Client.html#cancel_harvest_job-instance_method + def cancel_harvest_job: ( + channel_group_name: ::String, + channel_name: ::String, + origin_endpoint_name: ::String, + harvest_job_name: ::String, + ?etag: ::String + ) -> _CancelHarvestJobResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CancelHarvestJobResponseSuccess + interface _CreateChannelResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::CreateChannelResponse] def arn: () -> ::String @@ -119,6 +132,63 @@ module Aws ) -> _CreateChannelGroupResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateChannelGroupResponseSuccess + interface _CreateHarvestJobResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::CreateHarvestJobResponse] + def channel_group_name: () -> ::String + def channel_name: () -> ::String + def origin_endpoint_name: () -> ::String + def destination: () -> Types::Destination + def harvest_job_name: () -> ::String + def harvested_manifests: () -> Types::HarvestedManifests + def description: () -> ::String + def schedule_configuration: () -> Types::HarvesterScheduleConfiguration + def arn: () -> ::String + def created_at: () -> ::Time + def modified_at: () -> ::Time + def status: () -> ("QUEUED" | "IN_PROGRESS" | "CANCELLED" | "COMPLETED" | "FAILED") + def error_message: () -> ::String + def etag: () -> ::String + def tags: () -> ::Hash[::String, ::String] + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/MediaPackageV2/Client.html#create_harvest_job-instance_method + def create_harvest_job: ( + channel_group_name: ::String, + channel_name: ::String, + origin_endpoint_name: ::String, + ?description: ::String, + harvested_manifests: { + hls_manifests: Array[ + { + manifest_name: ::String + }, + ]?, + dash_manifests: Array[ + { + manifest_name: ::String + }, + ]?, + low_latency_hls_manifests: Array[ + { + manifest_name: ::String + }, + ]? + }, + schedule_configuration: { + start_time: ::Time, + end_time: ::Time + }, + destination: { + s3_destination: { + bucket_name: ::String, + destination_path: ::String + } + }, + ?client_token: ::String, + ?harvest_job_name: ::String, + ?tags: Hash[::String, ::String] + ) -> _CreateHarvestJobResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateHarvestJobResponseSuccess + interface _CreateOriginEndpointResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::CreateOriginEndpointResponse] def arn: () -> ::String @@ -353,6 +423,33 @@ module Aws ) -> _GetChannelPolicyResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetChannelPolicyResponseSuccess + interface _GetHarvestJobResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::GetHarvestJobResponse] + def channel_group_name: () -> ::String + def channel_name: () -> ::String + def origin_endpoint_name: () -> ::String + def destination: () -> Types::Destination + def harvest_job_name: () -> ::String + def harvested_manifests: () -> Types::HarvestedManifests + def description: () -> ::String + def schedule_configuration: () -> Types::HarvesterScheduleConfiguration + def arn: () -> ::String + def created_at: () -> ::Time + def modified_at: () -> ::Time + def status: () -> ("QUEUED" | "IN_PROGRESS" | "CANCELLED" | "COMPLETED" | "FAILED") + def error_message: () -> ::String + def etag: () -> ::String + def tags: () -> ::Hash[::String, ::String] + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/MediaPackageV2/Client.html#get_harvest_job-instance_method + def get_harvest_job: ( + channel_group_name: ::String, + channel_name: ::String, + origin_endpoint_name: ::String, + harvest_job_name: ::String + ) -> _GetHarvestJobResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetHarvestJobResponseSuccess + interface _GetOriginEndpointResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetOriginEndpointResponse] def arn: () -> ::String @@ -420,6 +517,22 @@ module Aws ) -> _ListChannelsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListChannelsResponseSuccess + interface _ListHarvestJobsResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::ListHarvestJobsResponse] + def items: () -> ::Array[Types::HarvestJob] + def next_token: () -> ::String + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/MediaPackageV2/Client.html#list_harvest_jobs-instance_method + def list_harvest_jobs: ( + channel_group_name: ::String, + ?channel_name: ::String, + ?origin_endpoint_name: ::String, + ?status: ("QUEUED" | "IN_PROGRESS" | "CANCELLED" | "COMPLETED" | "FAILED"), + ?max_results: ::Integer, + ?next_token: ::String + ) -> _ListHarvestJobsResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListHarvestJobsResponseSuccess + interface _ListOriginEndpointsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListOriginEndpointsResponse] def items: () -> ::Array[Types::OriginEndpointListConfiguration] @@ -653,6 +766,15 @@ module Aws ?etag: ::String ) -> _UpdateOriginEndpointResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UpdateOriginEndpointResponseSuccess + + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/MediaPackageV2/Client.html#wait_until-instance_method + def wait_until: (:harvest_job_finished waiter_name, + channel_group_name: ::String, + channel_name: ::String, + origin_endpoint_name: ::String, + harvest_job_name: ::String + ) -> Client::_GetHarvestJobResponseSuccess + | (:harvest_job_finished waiter_name, Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> Client::_GetHarvestJobResponseSuccess end end end diff --git a/gems/aws-sdk-mediapackagev2/sig/types.rbs b/gems/aws-sdk-mediapackagev2/sig/types.rbs index 9e85251d79b..c6e150bd47d 100644 --- a/gems/aws-sdk-mediapackagev2/sig/types.rbs +++ b/gems/aws-sdk-mediapackagev2/sig/types.rbs @@ -13,6 +13,18 @@ module Aws::MediaPackageV2 SENSITIVE: [] end + class CancelHarvestJobRequest + attr_accessor channel_group_name: ::String + attr_accessor channel_name: ::String + attr_accessor origin_endpoint_name: ::String + attr_accessor harvest_job_name: ::String + attr_accessor etag: ::String + SENSITIVE: [] + end + + class CancelHarvestJobResponse < Aws::EmptyStructure + end + class ChannelGroupListConfiguration attr_accessor channel_group_name: ::String attr_accessor arn: ::String @@ -98,6 +110,39 @@ module Aws::MediaPackageV2 SENSITIVE: [] end + class CreateHarvestJobRequest + attr_accessor channel_group_name: ::String + attr_accessor channel_name: ::String + attr_accessor origin_endpoint_name: ::String + attr_accessor description: ::String + attr_accessor harvested_manifests: Types::HarvestedManifests + attr_accessor schedule_configuration: Types::HarvesterScheduleConfiguration + attr_accessor destination: Types::Destination + attr_accessor client_token: ::String + attr_accessor harvest_job_name: ::String + attr_accessor tags: ::Hash[::String, ::String] + SENSITIVE: [] + end + + class CreateHarvestJobResponse + attr_accessor channel_group_name: ::String + attr_accessor channel_name: ::String + attr_accessor origin_endpoint_name: ::String + attr_accessor destination: Types::Destination + attr_accessor harvest_job_name: ::String + attr_accessor harvested_manifests: Types::HarvestedManifests + attr_accessor description: ::String + attr_accessor schedule_configuration: Types::HarvesterScheduleConfiguration + attr_accessor arn: ::String + attr_accessor created_at: ::Time + attr_accessor modified_at: ::Time + attr_accessor status: ("QUEUED" | "IN_PROGRESS" | "CANCELLED" | "COMPLETED" | "FAILED") + attr_accessor error_message: ::String + attr_accessor etag: ::String + attr_accessor tags: ::Hash[::String, ::String] + SENSITIVE: [] + end + class CreateHlsManifestConfiguration attr_accessor manifest_name: ::String attr_accessor child_manifest_name: ::String @@ -209,6 +254,11 @@ module Aws::MediaPackageV2 class DeleteOriginEndpointResponse < Aws::EmptyStructure end + class Destination + attr_accessor s3_destination: Types::S3DestinationConfig + SENSITIVE: [] + end + class Encryption attr_accessor constant_initialization_vector: ::String attr_accessor encryption_method: Types::EncryptionMethod @@ -309,6 +359,33 @@ module Aws::MediaPackageV2 SENSITIVE: [] end + class GetHarvestJobRequest + attr_accessor channel_group_name: ::String + attr_accessor channel_name: ::String + attr_accessor origin_endpoint_name: ::String + attr_accessor harvest_job_name: ::String + SENSITIVE: [] + end + + class GetHarvestJobResponse + attr_accessor channel_group_name: ::String + attr_accessor channel_name: ::String + attr_accessor origin_endpoint_name: ::String + attr_accessor destination: Types::Destination + attr_accessor harvest_job_name: ::String + attr_accessor harvested_manifests: Types::HarvestedManifests + attr_accessor description: ::String + attr_accessor schedule_configuration: Types::HarvesterScheduleConfiguration + attr_accessor arn: ::String + attr_accessor created_at: ::Time + attr_accessor modified_at: ::Time + attr_accessor status: ("QUEUED" | "IN_PROGRESS" | "CANCELLED" | "COMPLETED" | "FAILED") + attr_accessor error_message: ::String + attr_accessor etag: ::String + attr_accessor tags: ::Hash[::String, ::String] + SENSITIVE: [] + end + class GetHlsManifestConfiguration attr_accessor manifest_name: ::String attr_accessor url: ::String @@ -375,6 +452,52 @@ module Aws::MediaPackageV2 SENSITIVE: [] end + class HarvestJob + attr_accessor channel_group_name: ::String + attr_accessor channel_name: ::String + attr_accessor origin_endpoint_name: ::String + attr_accessor destination: Types::Destination + attr_accessor harvest_job_name: ::String + attr_accessor harvested_manifests: Types::HarvestedManifests + attr_accessor description: ::String + attr_accessor schedule_configuration: Types::HarvesterScheduleConfiguration + attr_accessor arn: ::String + attr_accessor created_at: ::Time + attr_accessor modified_at: ::Time + attr_accessor status: ("QUEUED" | "IN_PROGRESS" | "CANCELLED" | "COMPLETED" | "FAILED") + attr_accessor error_message: ::String + attr_accessor etag: ::String + SENSITIVE: [] + end + + class HarvestedDashManifest + attr_accessor manifest_name: ::String + SENSITIVE: [] + end + + class HarvestedHlsManifest + attr_accessor manifest_name: ::String + SENSITIVE: [] + end + + class HarvestedLowLatencyHlsManifest + attr_accessor manifest_name: ::String + SENSITIVE: [] + end + + class HarvestedManifests + attr_accessor hls_manifests: ::Array[Types::HarvestedHlsManifest] + attr_accessor dash_manifests: ::Array[Types::HarvestedDashManifest] + attr_accessor low_latency_hls_manifests: ::Array[Types::HarvestedLowLatencyHlsManifest] + SENSITIVE: [] + end + + class HarvesterScheduleConfiguration + attr_accessor start_time: ::Time + attr_accessor end_time: ::Time + SENSITIVE: [] + end + class IngestEndpoint attr_accessor id: ::String attr_accessor url: ::String @@ -417,6 +540,22 @@ module Aws::MediaPackageV2 SENSITIVE: [] end + class ListHarvestJobsRequest + attr_accessor channel_group_name: ::String + attr_accessor channel_name: ::String + attr_accessor origin_endpoint_name: ::String + attr_accessor status: ("QUEUED" | "IN_PROGRESS" | "CANCELLED" | "COMPLETED" | "FAILED") + attr_accessor max_results: ::Integer + attr_accessor next_token: ::String + SENSITIVE: [] + end + + class ListHarvestJobsResponse + attr_accessor items: ::Array[Types::HarvestJob] + attr_accessor next_token: ::String + SENSITIVE: [] + end + class ListHlsManifestConfiguration attr_accessor manifest_name: ::String attr_accessor child_manifest_name: ::String @@ -494,7 +633,13 @@ module Aws::MediaPackageV2 class ResourceNotFoundException attr_accessor message: ::String - attr_accessor resource_type_not_found: ("CHANNEL_GROUP" | "CHANNEL" | "ORIGIN_ENDPOINT") + attr_accessor resource_type_not_found: ("CHANNEL_GROUP" | "CHANNEL" | "ORIGIN_ENDPOINT" | "HARVEST_JOB") + SENSITIVE: [] + end + + class S3DestinationConfig + attr_accessor bucket_name: ::String + attr_accessor destination_path: ::String SENSITIVE: [] end @@ -640,7 +785,7 @@ module Aws::MediaPackageV2 class ValidationException attr_accessor message: ::String - attr_accessor validation_exception_type: ("CONTAINER_TYPE_IMMUTABLE" | "INVALID_PAGINATION_TOKEN" | "INVALID_PAGINATION_MAX_RESULTS" | "INVALID_POLICY" | "INVALID_ROLE_ARN" | "MANIFEST_NAME_COLLISION" | "ENCRYPTION_METHOD_CONTAINER_TYPE_MISMATCH" | "CENC_IV_INCOMPATIBLE" | "ENCRYPTION_CONTRACT_WITHOUT_AUDIO_RENDITION_INCOMPATIBLE" | "ENCRYPTION_CONTRACT_UNENCRYPTED" | "ENCRYPTION_CONTRACT_SHARED" | "NUM_MANIFESTS_LOW" | "NUM_MANIFESTS_HIGH" | "MANIFEST_DRM_SYSTEMS_INCOMPATIBLE" | "DRM_SYSTEMS_ENCRYPTION_METHOD_INCOMPATIBLE" | "ROLE_ARN_NOT_ASSUMABLE" | "ROLE_ARN_LENGTH_OUT_OF_RANGE" | "ROLE_ARN_INVALID_FORMAT" | "URL_INVALID" | "URL_SCHEME" | "URL_USER_INFO" | "URL_PORT" | "URL_UNKNOWN_HOST" | "URL_LOCAL_ADDRESS" | "URL_LOOPBACK_ADDRESS" | "URL_LINK_LOCAL_ADDRESS" | "URL_MULTICAST_ADDRESS" | "MEMBER_INVALID" | "MEMBER_MISSING" | "MEMBER_MIN_VALUE" | "MEMBER_MAX_VALUE" | "MEMBER_MIN_LENGTH" | "MEMBER_MAX_LENGTH" | "MEMBER_INVALID_ENUM_VALUE" | "MEMBER_DOES_NOT_MATCH_PATTERN" | "INVALID_MANIFEST_FILTER" | "INVALID_TIME_DELAY_SECONDS" | "END_TIME_EARLIER_THAN_START_TIME" | "TS_CONTAINER_TYPE_WITH_DASH_MANIFEST" | "DIRECT_MODE_WITH_TIMING_SOURCE" | "NONE_MODE_WITH_TIMING_SOURCE" | "TIMING_SOURCE_MISSING" | "UPDATE_PERIOD_SMALLER_THAN_SEGMENT_DURATION" | "PERIOD_TRIGGERS_NONE_SPECIFIED_WITH_ADDITIONAL_VALUES" | "DRM_SIGNALING_MISMATCH_SEGMENT_ENCRYPTION_STATUS" | "ONLY_CMAF_INPUT_TYPE_ALLOW_FORCE_ENDPOINT_ERROR_CONFIGURATION" | "SOURCE_DISRUPTIONS_ENABLED_INCORRECTLY" | "CLIP_START_TIME_WITH_START_OR_END" | "START_TAG_TIME_OFFSET_INVALID") + attr_accessor validation_exception_type: ("CONTAINER_TYPE_IMMUTABLE" | "INVALID_PAGINATION_TOKEN" | "INVALID_PAGINATION_MAX_RESULTS" | "INVALID_POLICY" | "INVALID_ROLE_ARN" | "MANIFEST_NAME_COLLISION" | "ENCRYPTION_METHOD_CONTAINER_TYPE_MISMATCH" | "CENC_IV_INCOMPATIBLE" | "ENCRYPTION_CONTRACT_WITHOUT_AUDIO_RENDITION_INCOMPATIBLE" | "ENCRYPTION_CONTRACT_UNENCRYPTED" | "ENCRYPTION_CONTRACT_SHARED" | "NUM_MANIFESTS_LOW" | "NUM_MANIFESTS_HIGH" | "MANIFEST_DRM_SYSTEMS_INCOMPATIBLE" | "DRM_SYSTEMS_ENCRYPTION_METHOD_INCOMPATIBLE" | "ROLE_ARN_NOT_ASSUMABLE" | "ROLE_ARN_LENGTH_OUT_OF_RANGE" | "ROLE_ARN_INVALID_FORMAT" | "URL_INVALID" | "URL_SCHEME" | "URL_USER_INFO" | "URL_PORT" | "URL_UNKNOWN_HOST" | "URL_LOCAL_ADDRESS" | "URL_LOOPBACK_ADDRESS" | "URL_LINK_LOCAL_ADDRESS" | "URL_MULTICAST_ADDRESS" | "MEMBER_INVALID" | "MEMBER_MISSING" | "MEMBER_MIN_VALUE" | "MEMBER_MAX_VALUE" | "MEMBER_MIN_LENGTH" | "MEMBER_MAX_LENGTH" | "MEMBER_INVALID_ENUM_VALUE" | "MEMBER_DOES_NOT_MATCH_PATTERN" | "INVALID_MANIFEST_FILTER" | "INVALID_TIME_DELAY_SECONDS" | "END_TIME_EARLIER_THAN_START_TIME" | "TS_CONTAINER_TYPE_WITH_DASH_MANIFEST" | "DIRECT_MODE_WITH_TIMING_SOURCE" | "NONE_MODE_WITH_TIMING_SOURCE" | "TIMING_SOURCE_MISSING" | "UPDATE_PERIOD_SMALLER_THAN_SEGMENT_DURATION" | "PERIOD_TRIGGERS_NONE_SPECIFIED_WITH_ADDITIONAL_VALUES" | "DRM_SIGNALING_MISMATCH_SEGMENT_ENCRYPTION_STATUS" | "ONLY_CMAF_INPUT_TYPE_ALLOW_FORCE_ENDPOINT_ERROR_CONFIGURATION" | "SOURCE_DISRUPTIONS_ENABLED_INCORRECTLY" | "HARVESTED_MANIFEST_HAS_START_END_FILTER_CONFIGURATION" | "HARVESTED_MANIFEST_NOT_FOUND_ON_ENDPOINT" | "TOO_MANY_IN_PROGRESS_HARVEST_JOBS" | "HARVEST_JOB_INELIGIBLE_FOR_CANCELLATION" | "INVALID_HARVEST_JOB_DURATION" | "HARVEST_JOB_S3_DESTINATION_MISSING_OR_INCOMPLETE" | "HARVEST_JOB_UNABLE_TO_WRITE_TO_S3_DESTINATION" | "HARVEST_JOB_CUSTOMER_ENDPOINT_READ_ACCESS_DENIED" | "CLIP_START_TIME_WITH_START_OR_END" | "START_TAG_TIME_OFFSET_INVALID") SENSITIVE: [] end end diff --git a/gems/aws-sdk-mediapackagev2/sig/waiters.rbs b/gems/aws-sdk-mediapackagev2/sig/waiters.rbs index 8abb2017b16..2c28c46f6f4 100644 --- a/gems/aws-sdk-mediapackagev2/sig/waiters.rbs +++ b/gems/aws-sdk-mediapackagev2/sig/waiters.rbs @@ -8,6 +8,19 @@ module Aws module MediaPackageV2 module Waiters + + class HarvestJobFinished + def initialize: (?client: Client, ?max_attempts: Integer, ?delay: Integer, ?before_attempt: Proc, ?before_wait: Proc) -> void + | (?Hash[Symbol, untyped]) -> void + + def wait: ( + channel_group_name: ::String, + channel_name: ::String, + origin_endpoint_name: ::String, + harvest_job_name: ::String + ) -> Client::_GetHarvestJobResponseSuccess + | (Hash[Symbol, untyped]) -> Client::_GetHarvestJobResponseSuccess + end end end end diff --git a/gems/aws-sdk-opensearchservice/CHANGELOG.md b/gems/aws-sdk-opensearchservice/CHANGELOG.md index 532a83e3cd2..4eebb21995c 100644 --- a/gems/aws-sdk-opensearchservice/CHANGELOG.md +++ b/gems/aws-sdk-opensearchservice/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.58.0 (2024-10-28) +------------------ + +* Feature - Adds support for provisioning dedicated coordinator nodes. Coordinator nodes can be specified using the new NodeOptions parameter in ClusterConfig. + 1.57.0 (2024-10-18) ------------------ diff --git a/gems/aws-sdk-opensearchservice/VERSION b/gems/aws-sdk-opensearchservice/VERSION index 373aea97570..79f82f6b8e0 100644 --- a/gems/aws-sdk-opensearchservice/VERSION +++ b/gems/aws-sdk-opensearchservice/VERSION @@ -1 +1 @@ -1.57.0 +1.58.0 diff --git a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice.rb b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice.rb index a5207626af0..6ba1eed9d4b 100644 --- a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice.rb +++ b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice.rb @@ -54,7 +54,7 @@ module Plugins autoload :EndpointProvider, 'aws-sdk-opensearchservice/endpoint_provider' autoload :Endpoints, 'aws-sdk-opensearchservice/endpoints' - GEM_VERSION = '1.57.0' + GEM_VERSION = '1.58.0' end diff --git a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client.rb b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client.rb index 9447a677b82..68ee73be25c 100644 --- a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client.rb +++ b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client.rb @@ -907,6 +907,16 @@ def cancel_service_software_update(params = {}, options = {}) # enabled: false, # required # }, # multi_az_with_standby_enabled: false, + # node_options: [ + # { + # node_type: "coordinator", # accepts coordinator + # node_config: { + # enabled: false, + # type: "m3.medium.search", # accepts m3.medium.search, m3.large.search, m3.xlarge.search, m3.2xlarge.search, m4.large.search, m4.xlarge.search, m4.2xlarge.search, m4.4xlarge.search, m4.10xlarge.search, m5.large.search, m5.xlarge.search, m5.2xlarge.search, m5.4xlarge.search, m5.12xlarge.search, m5.24xlarge.search, r5.large.search, r5.xlarge.search, r5.2xlarge.search, r5.4xlarge.search, r5.12xlarge.search, r5.24xlarge.search, c5.large.search, c5.xlarge.search, c5.2xlarge.search, c5.4xlarge.search, c5.9xlarge.search, c5.18xlarge.search, t3.nano.search, t3.micro.search, t3.small.search, t3.medium.search, t3.large.search, t3.xlarge.search, t3.2xlarge.search, or1.medium.search, or1.large.search, or1.xlarge.search, or1.2xlarge.search, or1.4xlarge.search, or1.8xlarge.search, or1.12xlarge.search, or1.16xlarge.search, ultrawarm1.medium.search, ultrawarm1.large.search, ultrawarm1.xlarge.search, t2.micro.search, t2.small.search, t2.medium.search, r3.large.search, r3.xlarge.search, r3.2xlarge.search, r3.4xlarge.search, r3.8xlarge.search, i2.xlarge.search, i2.2xlarge.search, d2.xlarge.search, d2.2xlarge.search, d2.4xlarge.search, d2.8xlarge.search, c4.large.search, c4.xlarge.search, c4.2xlarge.search, c4.4xlarge.search, c4.8xlarge.search, r4.large.search, r4.xlarge.search, r4.2xlarge.search, r4.4xlarge.search, r4.8xlarge.search, r4.16xlarge.search, i3.large.search, i3.xlarge.search, i3.2xlarge.search, i3.4xlarge.search, i3.8xlarge.search, i3.16xlarge.search, r6g.large.search, r6g.xlarge.search, r6g.2xlarge.search, r6g.4xlarge.search, r6g.8xlarge.search, r6g.12xlarge.search, m6g.large.search, m6g.xlarge.search, m6g.2xlarge.search, m6g.4xlarge.search, m6g.8xlarge.search, m6g.12xlarge.search, c6g.large.search, c6g.xlarge.search, c6g.2xlarge.search, c6g.4xlarge.search, c6g.8xlarge.search, c6g.12xlarge.search, r6gd.large.search, r6gd.xlarge.search, r6gd.2xlarge.search, r6gd.4xlarge.search, r6gd.8xlarge.search, r6gd.12xlarge.search, r6gd.16xlarge.search, t4g.small.search, t4g.medium.search + # count: 1, + # }, + # }, + # ], # }, # ebs_options: { # ebs_enabled: false, @@ -1047,6 +1057,11 @@ def cancel_service_software_update(params = {}, options = {}) # resp.domain_status.cluster_config.warm_count #=> Integer # resp.domain_status.cluster_config.cold_storage_options.enabled #=> Boolean # resp.domain_status.cluster_config.multi_az_with_standby_enabled #=> Boolean + # resp.domain_status.cluster_config.node_options #=> Array + # resp.domain_status.cluster_config.node_options[0].node_type #=> String, one of "coordinator" + # resp.domain_status.cluster_config.node_options[0].node_config.enabled #=> Boolean + # resp.domain_status.cluster_config.node_options[0].node_config.type #=> String, one of "m3.medium.search", "m3.large.search", "m3.xlarge.search", "m3.2xlarge.search", "m4.large.search", "m4.xlarge.search", "m4.2xlarge.search", "m4.4xlarge.search", "m4.10xlarge.search", "m5.large.search", "m5.xlarge.search", "m5.2xlarge.search", "m5.4xlarge.search", "m5.12xlarge.search", "m5.24xlarge.search", "r5.large.search", "r5.xlarge.search", "r5.2xlarge.search", "r5.4xlarge.search", "r5.12xlarge.search", "r5.24xlarge.search", "c5.large.search", "c5.xlarge.search", "c5.2xlarge.search", "c5.4xlarge.search", "c5.9xlarge.search", "c5.18xlarge.search", "t3.nano.search", "t3.micro.search", "t3.small.search", "t3.medium.search", "t3.large.search", "t3.xlarge.search", "t3.2xlarge.search", "or1.medium.search", "or1.large.search", "or1.xlarge.search", "or1.2xlarge.search", "or1.4xlarge.search", "or1.8xlarge.search", "or1.12xlarge.search", "or1.16xlarge.search", "ultrawarm1.medium.search", "ultrawarm1.large.search", "ultrawarm1.xlarge.search", "t2.micro.search", "t2.small.search", "t2.medium.search", "r3.large.search", "r3.xlarge.search", "r3.2xlarge.search", "r3.4xlarge.search", "r3.8xlarge.search", "i2.xlarge.search", "i2.2xlarge.search", "d2.xlarge.search", "d2.2xlarge.search", "d2.4xlarge.search", "d2.8xlarge.search", "c4.large.search", "c4.xlarge.search", "c4.2xlarge.search", "c4.4xlarge.search", "c4.8xlarge.search", "r4.large.search", "r4.xlarge.search", "r4.2xlarge.search", "r4.4xlarge.search", "r4.8xlarge.search", "r4.16xlarge.search", "i3.large.search", "i3.xlarge.search", "i3.2xlarge.search", "i3.4xlarge.search", "i3.8xlarge.search", "i3.16xlarge.search", "r6g.large.search", "r6g.xlarge.search", "r6g.2xlarge.search", "r6g.4xlarge.search", "r6g.8xlarge.search", "r6g.12xlarge.search", "m6g.large.search", "m6g.xlarge.search", "m6g.2xlarge.search", "m6g.4xlarge.search", "m6g.8xlarge.search", "m6g.12xlarge.search", "c6g.large.search", "c6g.xlarge.search", "c6g.2xlarge.search", "c6g.4xlarge.search", "c6g.8xlarge.search", "c6g.12xlarge.search", "r6gd.large.search", "r6gd.xlarge.search", "r6gd.2xlarge.search", "r6gd.4xlarge.search", "r6gd.8xlarge.search", "r6gd.12xlarge.search", "r6gd.16xlarge.search", "t4g.small.search", "t4g.medium.search" + # resp.domain_status.cluster_config.node_options[0].node_config.count #=> Integer # resp.domain_status.ebs_options.ebs_enabled #=> Boolean # resp.domain_status.ebs_options.volume_type #=> String, one of "standard", "gp2", "io1", "gp3" # resp.domain_status.ebs_options.volume_size #=> Integer @@ -1414,6 +1429,11 @@ def delete_data_source(params = {}, options = {}) # resp.domain_status.cluster_config.warm_count #=> Integer # resp.domain_status.cluster_config.cold_storage_options.enabled #=> Boolean # resp.domain_status.cluster_config.multi_az_with_standby_enabled #=> Boolean + # resp.domain_status.cluster_config.node_options #=> Array + # resp.domain_status.cluster_config.node_options[0].node_type #=> String, one of "coordinator" + # resp.domain_status.cluster_config.node_options[0].node_config.enabled #=> Boolean + # resp.domain_status.cluster_config.node_options[0].node_config.type #=> String, one of "m3.medium.search", "m3.large.search", "m3.xlarge.search", "m3.2xlarge.search", "m4.large.search", "m4.xlarge.search", "m4.2xlarge.search", "m4.4xlarge.search", "m4.10xlarge.search", "m5.large.search", "m5.xlarge.search", "m5.2xlarge.search", "m5.4xlarge.search", "m5.12xlarge.search", "m5.24xlarge.search", "r5.large.search", "r5.xlarge.search", "r5.2xlarge.search", "r5.4xlarge.search", "r5.12xlarge.search", "r5.24xlarge.search", "c5.large.search", "c5.xlarge.search", "c5.2xlarge.search", "c5.4xlarge.search", "c5.9xlarge.search", "c5.18xlarge.search", "t3.nano.search", "t3.micro.search", "t3.small.search", "t3.medium.search", "t3.large.search", "t3.xlarge.search", "t3.2xlarge.search", "or1.medium.search", "or1.large.search", "or1.xlarge.search", "or1.2xlarge.search", "or1.4xlarge.search", "or1.8xlarge.search", "or1.12xlarge.search", "or1.16xlarge.search", "ultrawarm1.medium.search", "ultrawarm1.large.search", "ultrawarm1.xlarge.search", "t2.micro.search", "t2.small.search", "t2.medium.search", "r3.large.search", "r3.xlarge.search", "r3.2xlarge.search", "r3.4xlarge.search", "r3.8xlarge.search", "i2.xlarge.search", "i2.2xlarge.search", "d2.xlarge.search", "d2.2xlarge.search", "d2.4xlarge.search", "d2.8xlarge.search", "c4.large.search", "c4.xlarge.search", "c4.2xlarge.search", "c4.4xlarge.search", "c4.8xlarge.search", "r4.large.search", "r4.xlarge.search", "r4.2xlarge.search", "r4.4xlarge.search", "r4.8xlarge.search", "r4.16xlarge.search", "i3.large.search", "i3.xlarge.search", "i3.2xlarge.search", "i3.4xlarge.search", "i3.8xlarge.search", "i3.16xlarge.search", "r6g.large.search", "r6g.xlarge.search", "r6g.2xlarge.search", "r6g.4xlarge.search", "r6g.8xlarge.search", "r6g.12xlarge.search", "m6g.large.search", "m6g.xlarge.search", "m6g.2xlarge.search", "m6g.4xlarge.search", "m6g.8xlarge.search", "m6g.12xlarge.search", "c6g.large.search", "c6g.xlarge.search", "c6g.2xlarge.search", "c6g.4xlarge.search", "c6g.8xlarge.search", "c6g.12xlarge.search", "r6gd.large.search", "r6gd.xlarge.search", "r6gd.2xlarge.search", "r6gd.4xlarge.search", "r6gd.8xlarge.search", "r6gd.12xlarge.search", "r6gd.16xlarge.search", "t4g.small.search", "t4g.medium.search" + # resp.domain_status.cluster_config.node_options[0].node_config.count #=> Integer # resp.domain_status.ebs_options.ebs_enabled #=> Boolean # resp.domain_status.ebs_options.volume_type #=> String, one of "standard", "gp2", "io1", "gp3" # resp.domain_status.ebs_options.volume_size #=> Integer @@ -1714,6 +1734,11 @@ def delete_vpc_endpoint(params = {}, options = {}) # resp.domain_status.cluster_config.warm_count #=> Integer # resp.domain_status.cluster_config.cold_storage_options.enabled #=> Boolean # resp.domain_status.cluster_config.multi_az_with_standby_enabled #=> Boolean + # resp.domain_status.cluster_config.node_options #=> Array + # resp.domain_status.cluster_config.node_options[0].node_type #=> String, one of "coordinator" + # resp.domain_status.cluster_config.node_options[0].node_config.enabled #=> Boolean + # resp.domain_status.cluster_config.node_options[0].node_config.type #=> String, one of "m3.medium.search", "m3.large.search", "m3.xlarge.search", "m3.2xlarge.search", "m4.large.search", "m4.xlarge.search", "m4.2xlarge.search", "m4.4xlarge.search", "m4.10xlarge.search", "m5.large.search", "m5.xlarge.search", "m5.2xlarge.search", "m5.4xlarge.search", "m5.12xlarge.search", "m5.24xlarge.search", "r5.large.search", "r5.xlarge.search", "r5.2xlarge.search", "r5.4xlarge.search", "r5.12xlarge.search", "r5.24xlarge.search", "c5.large.search", "c5.xlarge.search", "c5.2xlarge.search", "c5.4xlarge.search", "c5.9xlarge.search", "c5.18xlarge.search", "t3.nano.search", "t3.micro.search", "t3.small.search", "t3.medium.search", "t3.large.search", "t3.xlarge.search", "t3.2xlarge.search", "or1.medium.search", "or1.large.search", "or1.xlarge.search", "or1.2xlarge.search", "or1.4xlarge.search", "or1.8xlarge.search", "or1.12xlarge.search", "or1.16xlarge.search", "ultrawarm1.medium.search", "ultrawarm1.large.search", "ultrawarm1.xlarge.search", "t2.micro.search", "t2.small.search", "t2.medium.search", "r3.large.search", "r3.xlarge.search", "r3.2xlarge.search", "r3.4xlarge.search", "r3.8xlarge.search", "i2.xlarge.search", "i2.2xlarge.search", "d2.xlarge.search", "d2.2xlarge.search", "d2.4xlarge.search", "d2.8xlarge.search", "c4.large.search", "c4.xlarge.search", "c4.2xlarge.search", "c4.4xlarge.search", "c4.8xlarge.search", "r4.large.search", "r4.xlarge.search", "r4.2xlarge.search", "r4.4xlarge.search", "r4.8xlarge.search", "r4.16xlarge.search", "i3.large.search", "i3.xlarge.search", "i3.2xlarge.search", "i3.4xlarge.search", "i3.8xlarge.search", "i3.16xlarge.search", "r6g.large.search", "r6g.xlarge.search", "r6g.2xlarge.search", "r6g.4xlarge.search", "r6g.8xlarge.search", "r6g.12xlarge.search", "m6g.large.search", "m6g.xlarge.search", "m6g.2xlarge.search", "m6g.4xlarge.search", "m6g.8xlarge.search", "m6g.12xlarge.search", "c6g.large.search", "c6g.xlarge.search", "c6g.2xlarge.search", "c6g.4xlarge.search", "c6g.8xlarge.search", "c6g.12xlarge.search", "r6gd.large.search", "r6gd.xlarge.search", "r6gd.2xlarge.search", "r6gd.4xlarge.search", "r6gd.8xlarge.search", "r6gd.12xlarge.search", "r6gd.16xlarge.search", "t4g.small.search", "t4g.medium.search" + # resp.domain_status.cluster_config.node_options[0].node_config.count #=> Integer # resp.domain_status.ebs_options.ebs_enabled #=> Boolean # resp.domain_status.ebs_options.volume_type #=> String, one of "standard", "gp2", "io1", "gp3" # resp.domain_status.ebs_options.volume_size #=> Integer @@ -1945,6 +1970,11 @@ def describe_domain_change_progress(params = {}, options = {}) # resp.domain_config.cluster_config.options.warm_count #=> Integer # resp.domain_config.cluster_config.options.cold_storage_options.enabled #=> Boolean # resp.domain_config.cluster_config.options.multi_az_with_standby_enabled #=> Boolean + # resp.domain_config.cluster_config.options.node_options #=> Array + # resp.domain_config.cluster_config.options.node_options[0].node_type #=> String, one of "coordinator" + # resp.domain_config.cluster_config.options.node_options[0].node_config.enabled #=> Boolean + # resp.domain_config.cluster_config.options.node_options[0].node_config.type #=> String, one of "m3.medium.search", "m3.large.search", "m3.xlarge.search", "m3.2xlarge.search", "m4.large.search", "m4.xlarge.search", "m4.2xlarge.search", "m4.4xlarge.search", "m4.10xlarge.search", "m5.large.search", "m5.xlarge.search", "m5.2xlarge.search", "m5.4xlarge.search", "m5.12xlarge.search", "m5.24xlarge.search", "r5.large.search", "r5.xlarge.search", "r5.2xlarge.search", "r5.4xlarge.search", "r5.12xlarge.search", "r5.24xlarge.search", "c5.large.search", "c5.xlarge.search", "c5.2xlarge.search", "c5.4xlarge.search", "c5.9xlarge.search", "c5.18xlarge.search", "t3.nano.search", "t3.micro.search", "t3.small.search", "t3.medium.search", "t3.large.search", "t3.xlarge.search", "t3.2xlarge.search", "or1.medium.search", "or1.large.search", "or1.xlarge.search", "or1.2xlarge.search", "or1.4xlarge.search", "or1.8xlarge.search", "or1.12xlarge.search", "or1.16xlarge.search", "ultrawarm1.medium.search", "ultrawarm1.large.search", "ultrawarm1.xlarge.search", "t2.micro.search", "t2.small.search", "t2.medium.search", "r3.large.search", "r3.xlarge.search", "r3.2xlarge.search", "r3.4xlarge.search", "r3.8xlarge.search", "i2.xlarge.search", "i2.2xlarge.search", "d2.xlarge.search", "d2.2xlarge.search", "d2.4xlarge.search", "d2.8xlarge.search", "c4.large.search", "c4.xlarge.search", "c4.2xlarge.search", "c4.4xlarge.search", "c4.8xlarge.search", "r4.large.search", "r4.xlarge.search", "r4.2xlarge.search", "r4.4xlarge.search", "r4.8xlarge.search", "r4.16xlarge.search", "i3.large.search", "i3.xlarge.search", "i3.2xlarge.search", "i3.4xlarge.search", "i3.8xlarge.search", "i3.16xlarge.search", "r6g.large.search", "r6g.xlarge.search", "r6g.2xlarge.search", "r6g.4xlarge.search", "r6g.8xlarge.search", "r6g.12xlarge.search", "m6g.large.search", "m6g.xlarge.search", "m6g.2xlarge.search", "m6g.4xlarge.search", "m6g.8xlarge.search", "m6g.12xlarge.search", "c6g.large.search", "c6g.xlarge.search", "c6g.2xlarge.search", "c6g.4xlarge.search", "c6g.8xlarge.search", "c6g.12xlarge.search", "r6gd.large.search", "r6gd.xlarge.search", "r6gd.2xlarge.search", "r6gd.4xlarge.search", "r6gd.8xlarge.search", "r6gd.12xlarge.search", "r6gd.16xlarge.search", "t4g.small.search", "t4g.medium.search" + # resp.domain_config.cluster_config.options.node_options[0].node_config.count #=> Integer # resp.domain_config.cluster_config.status.creation_date #=> Time # resp.domain_config.cluster_config.status.update_date #=> Time # resp.domain_config.cluster_config.status.update_version #=> Integer @@ -2256,6 +2286,11 @@ def describe_domain_nodes(params = {}, options = {}) # resp.domain_status_list[0].cluster_config.warm_count #=> Integer # resp.domain_status_list[0].cluster_config.cold_storage_options.enabled #=> Boolean # resp.domain_status_list[0].cluster_config.multi_az_with_standby_enabled #=> Boolean + # resp.domain_status_list[0].cluster_config.node_options #=> Array + # resp.domain_status_list[0].cluster_config.node_options[0].node_type #=> String, one of "coordinator" + # resp.domain_status_list[0].cluster_config.node_options[0].node_config.enabled #=> Boolean + # resp.domain_status_list[0].cluster_config.node_options[0].node_config.type #=> String, one of "m3.medium.search", "m3.large.search", "m3.xlarge.search", "m3.2xlarge.search", "m4.large.search", "m4.xlarge.search", "m4.2xlarge.search", "m4.4xlarge.search", "m4.10xlarge.search", "m5.large.search", "m5.xlarge.search", "m5.2xlarge.search", "m5.4xlarge.search", "m5.12xlarge.search", "m5.24xlarge.search", "r5.large.search", "r5.xlarge.search", "r5.2xlarge.search", "r5.4xlarge.search", "r5.12xlarge.search", "r5.24xlarge.search", "c5.large.search", "c5.xlarge.search", "c5.2xlarge.search", "c5.4xlarge.search", "c5.9xlarge.search", "c5.18xlarge.search", "t3.nano.search", "t3.micro.search", "t3.small.search", "t3.medium.search", "t3.large.search", "t3.xlarge.search", "t3.2xlarge.search", "or1.medium.search", "or1.large.search", "or1.xlarge.search", "or1.2xlarge.search", "or1.4xlarge.search", "or1.8xlarge.search", "or1.12xlarge.search", "or1.16xlarge.search", "ultrawarm1.medium.search", "ultrawarm1.large.search", "ultrawarm1.xlarge.search", "t2.micro.search", "t2.small.search", "t2.medium.search", "r3.large.search", "r3.xlarge.search", "r3.2xlarge.search", "r3.4xlarge.search", "r3.8xlarge.search", "i2.xlarge.search", "i2.2xlarge.search", "d2.xlarge.search", "d2.2xlarge.search", "d2.4xlarge.search", "d2.8xlarge.search", "c4.large.search", "c4.xlarge.search", "c4.2xlarge.search", "c4.4xlarge.search", "c4.8xlarge.search", "r4.large.search", "r4.xlarge.search", "r4.2xlarge.search", "r4.4xlarge.search", "r4.8xlarge.search", "r4.16xlarge.search", "i3.large.search", "i3.xlarge.search", "i3.2xlarge.search", "i3.4xlarge.search", "i3.8xlarge.search", "i3.16xlarge.search", "r6g.large.search", "r6g.xlarge.search", "r6g.2xlarge.search", "r6g.4xlarge.search", "r6g.8xlarge.search", "r6g.12xlarge.search", "m6g.large.search", "m6g.xlarge.search", "m6g.2xlarge.search", "m6g.4xlarge.search", "m6g.8xlarge.search", "m6g.12xlarge.search", "c6g.large.search", "c6g.xlarge.search", "c6g.2xlarge.search", "c6g.4xlarge.search", "c6g.8xlarge.search", "c6g.12xlarge.search", "r6gd.large.search", "r6gd.xlarge.search", "r6gd.2xlarge.search", "r6gd.4xlarge.search", "r6gd.8xlarge.search", "r6gd.12xlarge.search", "r6gd.16xlarge.search", "t4g.small.search", "t4g.medium.search" + # resp.domain_status_list[0].cluster_config.node_options[0].node_config.count #=> Integer # resp.domain_status_list[0].ebs_options.ebs_enabled #=> Boolean # resp.domain_status_list[0].ebs_options.volume_type #=> String, one of "standard", "gp2", "io1", "gp3" # resp.domain_status_list[0].ebs_options.volume_size #=> Integer @@ -2408,6 +2443,11 @@ def describe_domains(params = {}, options = {}) # resp.dry_run_config.cluster_config.warm_count #=> Integer # resp.dry_run_config.cluster_config.cold_storage_options.enabled #=> Boolean # resp.dry_run_config.cluster_config.multi_az_with_standby_enabled #=> Boolean + # resp.dry_run_config.cluster_config.node_options #=> Array + # resp.dry_run_config.cluster_config.node_options[0].node_type #=> String, one of "coordinator" + # resp.dry_run_config.cluster_config.node_options[0].node_config.enabled #=> Boolean + # resp.dry_run_config.cluster_config.node_options[0].node_config.type #=> String, one of "m3.medium.search", "m3.large.search", "m3.xlarge.search", "m3.2xlarge.search", "m4.large.search", "m4.xlarge.search", "m4.2xlarge.search", "m4.4xlarge.search", "m4.10xlarge.search", "m5.large.search", "m5.xlarge.search", "m5.2xlarge.search", "m5.4xlarge.search", "m5.12xlarge.search", "m5.24xlarge.search", "r5.large.search", "r5.xlarge.search", "r5.2xlarge.search", "r5.4xlarge.search", "r5.12xlarge.search", "r5.24xlarge.search", "c5.large.search", "c5.xlarge.search", "c5.2xlarge.search", "c5.4xlarge.search", "c5.9xlarge.search", "c5.18xlarge.search", "t3.nano.search", "t3.micro.search", "t3.small.search", "t3.medium.search", "t3.large.search", "t3.xlarge.search", "t3.2xlarge.search", "or1.medium.search", "or1.large.search", "or1.xlarge.search", "or1.2xlarge.search", "or1.4xlarge.search", "or1.8xlarge.search", "or1.12xlarge.search", "or1.16xlarge.search", "ultrawarm1.medium.search", "ultrawarm1.large.search", "ultrawarm1.xlarge.search", "t2.micro.search", "t2.small.search", "t2.medium.search", "r3.large.search", "r3.xlarge.search", "r3.2xlarge.search", "r3.4xlarge.search", "r3.8xlarge.search", "i2.xlarge.search", "i2.2xlarge.search", "d2.xlarge.search", "d2.2xlarge.search", "d2.4xlarge.search", "d2.8xlarge.search", "c4.large.search", "c4.xlarge.search", "c4.2xlarge.search", "c4.4xlarge.search", "c4.8xlarge.search", "r4.large.search", "r4.xlarge.search", "r4.2xlarge.search", "r4.4xlarge.search", "r4.8xlarge.search", "r4.16xlarge.search", "i3.large.search", "i3.xlarge.search", "i3.2xlarge.search", "i3.4xlarge.search", "i3.8xlarge.search", "i3.16xlarge.search", "r6g.large.search", "r6g.xlarge.search", "r6g.2xlarge.search", "r6g.4xlarge.search", "r6g.8xlarge.search", "r6g.12xlarge.search", "m6g.large.search", "m6g.xlarge.search", "m6g.2xlarge.search", "m6g.4xlarge.search", "m6g.8xlarge.search", "m6g.12xlarge.search", "c6g.large.search", "c6g.xlarge.search", "c6g.2xlarge.search", "c6g.4xlarge.search", "c6g.8xlarge.search", "c6g.12xlarge.search", "r6gd.large.search", "r6gd.xlarge.search", "r6gd.2xlarge.search", "r6gd.4xlarge.search", "r6gd.8xlarge.search", "r6gd.12xlarge.search", "r6gd.16xlarge.search", "t4g.small.search", "t4g.medium.search" + # resp.dry_run_config.cluster_config.node_options[0].node_config.count #=> Integer # resp.dry_run_config.ebs_options.ebs_enabled #=> Boolean # resp.dry_run_config.ebs_options.volume_type #=> String, one of "standard", "gp2", "io1", "gp3" # resp.dry_run_config.ebs_options.volume_size #=> Integer @@ -4256,6 +4296,16 @@ def update_data_source(params = {}, options = {}) # enabled: false, # required # }, # multi_az_with_standby_enabled: false, + # node_options: [ + # { + # node_type: "coordinator", # accepts coordinator + # node_config: { + # enabled: false, + # type: "m3.medium.search", # accepts m3.medium.search, m3.large.search, m3.xlarge.search, m3.2xlarge.search, m4.large.search, m4.xlarge.search, m4.2xlarge.search, m4.4xlarge.search, m4.10xlarge.search, m5.large.search, m5.xlarge.search, m5.2xlarge.search, m5.4xlarge.search, m5.12xlarge.search, m5.24xlarge.search, r5.large.search, r5.xlarge.search, r5.2xlarge.search, r5.4xlarge.search, r5.12xlarge.search, r5.24xlarge.search, c5.large.search, c5.xlarge.search, c5.2xlarge.search, c5.4xlarge.search, c5.9xlarge.search, c5.18xlarge.search, t3.nano.search, t3.micro.search, t3.small.search, t3.medium.search, t3.large.search, t3.xlarge.search, t3.2xlarge.search, or1.medium.search, or1.large.search, or1.xlarge.search, or1.2xlarge.search, or1.4xlarge.search, or1.8xlarge.search, or1.12xlarge.search, or1.16xlarge.search, ultrawarm1.medium.search, ultrawarm1.large.search, ultrawarm1.xlarge.search, t2.micro.search, t2.small.search, t2.medium.search, r3.large.search, r3.xlarge.search, r3.2xlarge.search, r3.4xlarge.search, r3.8xlarge.search, i2.xlarge.search, i2.2xlarge.search, d2.xlarge.search, d2.2xlarge.search, d2.4xlarge.search, d2.8xlarge.search, c4.large.search, c4.xlarge.search, c4.2xlarge.search, c4.4xlarge.search, c4.8xlarge.search, r4.large.search, r4.xlarge.search, r4.2xlarge.search, r4.4xlarge.search, r4.8xlarge.search, r4.16xlarge.search, i3.large.search, i3.xlarge.search, i3.2xlarge.search, i3.4xlarge.search, i3.8xlarge.search, i3.16xlarge.search, r6g.large.search, r6g.xlarge.search, r6g.2xlarge.search, r6g.4xlarge.search, r6g.8xlarge.search, r6g.12xlarge.search, m6g.large.search, m6g.xlarge.search, m6g.2xlarge.search, m6g.4xlarge.search, m6g.8xlarge.search, m6g.12xlarge.search, c6g.large.search, c6g.xlarge.search, c6g.2xlarge.search, c6g.4xlarge.search, c6g.8xlarge.search, c6g.12xlarge.search, r6gd.large.search, r6gd.xlarge.search, r6gd.2xlarge.search, r6gd.4xlarge.search, r6gd.8xlarge.search, r6gd.12xlarge.search, r6gd.16xlarge.search, t4g.small.search, t4g.medium.search + # count: 1, + # }, + # }, + # ], # }, # ebs_options: { # ebs_enabled: false, @@ -4386,6 +4436,11 @@ def update_data_source(params = {}, options = {}) # resp.domain_config.cluster_config.options.warm_count #=> Integer # resp.domain_config.cluster_config.options.cold_storage_options.enabled #=> Boolean # resp.domain_config.cluster_config.options.multi_az_with_standby_enabled #=> Boolean + # resp.domain_config.cluster_config.options.node_options #=> Array + # resp.domain_config.cluster_config.options.node_options[0].node_type #=> String, one of "coordinator" + # resp.domain_config.cluster_config.options.node_options[0].node_config.enabled #=> Boolean + # resp.domain_config.cluster_config.options.node_options[0].node_config.type #=> String, one of "m3.medium.search", "m3.large.search", "m3.xlarge.search", "m3.2xlarge.search", "m4.large.search", "m4.xlarge.search", "m4.2xlarge.search", "m4.4xlarge.search", "m4.10xlarge.search", "m5.large.search", "m5.xlarge.search", "m5.2xlarge.search", "m5.4xlarge.search", "m5.12xlarge.search", "m5.24xlarge.search", "r5.large.search", "r5.xlarge.search", "r5.2xlarge.search", "r5.4xlarge.search", "r5.12xlarge.search", "r5.24xlarge.search", "c5.large.search", "c5.xlarge.search", "c5.2xlarge.search", "c5.4xlarge.search", "c5.9xlarge.search", "c5.18xlarge.search", "t3.nano.search", "t3.micro.search", "t3.small.search", "t3.medium.search", "t3.large.search", "t3.xlarge.search", "t3.2xlarge.search", "or1.medium.search", "or1.large.search", "or1.xlarge.search", "or1.2xlarge.search", "or1.4xlarge.search", "or1.8xlarge.search", "or1.12xlarge.search", "or1.16xlarge.search", "ultrawarm1.medium.search", "ultrawarm1.large.search", "ultrawarm1.xlarge.search", "t2.micro.search", "t2.small.search", "t2.medium.search", "r3.large.search", "r3.xlarge.search", "r3.2xlarge.search", "r3.4xlarge.search", "r3.8xlarge.search", "i2.xlarge.search", "i2.2xlarge.search", "d2.xlarge.search", "d2.2xlarge.search", "d2.4xlarge.search", "d2.8xlarge.search", "c4.large.search", "c4.xlarge.search", "c4.2xlarge.search", "c4.4xlarge.search", "c4.8xlarge.search", "r4.large.search", "r4.xlarge.search", "r4.2xlarge.search", "r4.4xlarge.search", "r4.8xlarge.search", "r4.16xlarge.search", "i3.large.search", "i3.xlarge.search", "i3.2xlarge.search", "i3.4xlarge.search", "i3.8xlarge.search", "i3.16xlarge.search", "r6g.large.search", "r6g.xlarge.search", "r6g.2xlarge.search", "r6g.4xlarge.search", "r6g.8xlarge.search", "r6g.12xlarge.search", "m6g.large.search", "m6g.xlarge.search", "m6g.2xlarge.search", "m6g.4xlarge.search", "m6g.8xlarge.search", "m6g.12xlarge.search", "c6g.large.search", "c6g.xlarge.search", "c6g.2xlarge.search", "c6g.4xlarge.search", "c6g.8xlarge.search", "c6g.12xlarge.search", "r6gd.large.search", "r6gd.xlarge.search", "r6gd.2xlarge.search", "r6gd.4xlarge.search", "r6gd.8xlarge.search", "r6gd.12xlarge.search", "r6gd.16xlarge.search", "t4g.small.search", "t4g.medium.search" + # resp.domain_config.cluster_config.options.node_options[0].node_config.count #=> Integer # resp.domain_config.cluster_config.status.creation_date #=> Time # resp.domain_config.cluster_config.status.update_date #=> Time # resp.domain_config.cluster_config.status.update_version #=> Integer @@ -4841,7 +4896,7 @@ def build_request(operation_name, params = {}) tracer: tracer ) context[:gem_name] = 'aws-sdk-opensearchservice' - context[:gem_version] = '1.57.0' + context[:gem_version] = '1.58.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client_api.rb b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client_api.rb index 58fe5b16b56..5dae4421fa9 100644 --- a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client_api.rb +++ b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client_api.rb @@ -303,7 +303,11 @@ module ClientApi NaturalLanguageQueryGenerationOptionsInput = Shapes::StructureShape.new(name: 'NaturalLanguageQueryGenerationOptionsInput') NaturalLanguageQueryGenerationOptionsOutput = Shapes::StructureShape.new(name: 'NaturalLanguageQueryGenerationOptionsOutput') NextToken = Shapes::StringShape.new(name: 'NextToken') + NodeConfig = Shapes::StructureShape.new(name: 'NodeConfig') NodeId = Shapes::StringShape.new(name: 'NodeId') + NodeOption = Shapes::StructureShape.new(name: 'NodeOption') + NodeOptionsList = Shapes::ListShape.new(name: 'NodeOptionsList') + NodeOptionsNodeType = Shapes::StringShape.new(name: 'NodeOptionsNodeType') NodeStatus = Shapes::StringShape.new(name: 'NodeStatus') NodeToNodeEncryptionOptions = Shapes::StructureShape.new(name: 'NodeToNodeEncryptionOptions') NodeToNodeEncryptionOptionsStatus = Shapes::StructureShape.new(name: 'NodeToNodeEncryptionOptionsStatus') @@ -681,6 +685,7 @@ module ClientApi ClusterConfig.add_member(:warm_count, Shapes::ShapeRef.new(shape: IntegerClass, location_name: "WarmCount")) ClusterConfig.add_member(:cold_storage_options, Shapes::ShapeRef.new(shape: ColdStorageOptions, location_name: "ColdStorageOptions")) ClusterConfig.add_member(:multi_az_with_standby_enabled, Shapes::ShapeRef.new(shape: Boolean, location_name: "MultiAZWithStandbyEnabled")) + ClusterConfig.add_member(:node_options, Shapes::ShapeRef.new(shape: NodeOptionsList, location_name: "NodeOptions")) ClusterConfig.struct_class = Types::ClusterConfig ClusterConfigStatus.add_member(:options, Shapes::ShapeRef.new(shape: ClusterConfig, required: true, location_name: "Options")) @@ -1398,6 +1403,17 @@ module ClientApi NaturalLanguageQueryGenerationOptionsOutput.add_member(:current_state, Shapes::ShapeRef.new(shape: NaturalLanguageQueryGenerationCurrentState, location_name: "CurrentState")) NaturalLanguageQueryGenerationOptionsOutput.struct_class = Types::NaturalLanguageQueryGenerationOptionsOutput + NodeConfig.add_member(:enabled, Shapes::ShapeRef.new(shape: Boolean, location_name: "Enabled")) + NodeConfig.add_member(:type, Shapes::ShapeRef.new(shape: OpenSearchPartitionInstanceType, location_name: "Type")) + NodeConfig.add_member(:count, Shapes::ShapeRef.new(shape: IntegerClass, location_name: "Count")) + NodeConfig.struct_class = Types::NodeConfig + + NodeOption.add_member(:node_type, Shapes::ShapeRef.new(shape: NodeOptionsNodeType, location_name: "NodeType")) + NodeOption.add_member(:node_config, Shapes::ShapeRef.new(shape: NodeConfig, location_name: "NodeConfig")) + NodeOption.struct_class = Types::NodeOption + + NodeOptionsList.member = Shapes::ShapeRef.new(shape: NodeOption) + NodeToNodeEncryptionOptions.add_member(:enabled, Shapes::ShapeRef.new(shape: Boolean, location_name: "Enabled")) NodeToNodeEncryptionOptions.struct_class = Types::NodeToNodeEncryptionOptions diff --git a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/types.rb b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/types.rb index 53bbb22998a..3b153e1db78 100644 --- a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/types.rb +++ b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/types.rb @@ -1136,6 +1136,10 @@ class ChangeProgressStatusDetails < Struct.new( # [1]: https://docs.aws.amazon.com/opensearch-service/latest/developerguide/managedomains-multiaz.html # @return [Boolean] # + # @!attribute [rw] node_options + # List of node options for the domain. + # @return [Array] + # # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/ClusterConfig AWS API Documentation # class ClusterConfig < Struct.new( @@ -1150,7 +1154,8 @@ class ClusterConfig < Struct.new( :warm_type, :warm_count, :cold_storage_options, - :multi_az_with_standby_enabled) + :multi_az_with_standby_enabled, + :node_options) SENSITIVE = [] include Aws::Structure end @@ -4802,6 +4807,50 @@ class NaturalLanguageQueryGenerationOptionsOutput < Struct.new( include Aws::Structure end + # Container for specifying configuration of any node type. + # + # @!attribute [rw] enabled + # A boolean that indicates whether a particular node type is enabled + # or not. + # @return [Boolean] + # + # @!attribute [rw] type + # The instance type of a particular node type in the cluster. + # @return [String] + # + # @!attribute [rw] count + # The number of nodes of a particular node type in the cluster. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/NodeConfig AWS API Documentation + # + class NodeConfig < Struct.new( + :enabled, + :type, + :count) + SENSITIVE = [] + include Aws::Structure + end + + # Container for specifying node type. + # + # @!attribute [rw] node_type + # Container for node type like coordinating. + # @return [String] + # + # @!attribute [rw] node_config + # Container for specifying configuration of any node type. + # @return [Types::NodeConfig] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/NodeOption AWS API Documentation + # + class NodeOption < Struct.new( + :node_type, + :node_config) + SENSITIVE = [] + include Aws::Structure + end + # Enables or disables node-to-node encryption. For more information, see # [Node-to-node encryption for Amazon OpenSearch Service][1]. # diff --git a/gems/aws-sdk-opensearchservice/sig/client.rbs b/gems/aws-sdk-opensearchservice/sig/client.rbs index ad94d54a84e..9f11a5f7135 100644 --- a/gems/aws-sdk-opensearchservice/sig/client.rbs +++ b/gems/aws-sdk-opensearchservice/sig/client.rbs @@ -183,7 +183,17 @@ module Aws cold_storage_options: { enabled: bool }?, - multi_az_with_standby_enabled: bool? + multi_az_with_standby_enabled: bool?, + node_options: Array[ + { + node_type: ("coordinator")?, + node_config: { + enabled: bool?, + type: ("m3.medium.search" | "m3.large.search" | "m3.xlarge.search" | "m3.2xlarge.search" | "m4.large.search" | "m4.xlarge.search" | "m4.2xlarge.search" | "m4.4xlarge.search" | "m4.10xlarge.search" | "m5.large.search" | "m5.xlarge.search" | "m5.2xlarge.search" | "m5.4xlarge.search" | "m5.12xlarge.search" | "m5.24xlarge.search" | "r5.large.search" | "r5.xlarge.search" | "r5.2xlarge.search" | "r5.4xlarge.search" | "r5.12xlarge.search" | "r5.24xlarge.search" | "c5.large.search" | "c5.xlarge.search" | "c5.2xlarge.search" | "c5.4xlarge.search" | "c5.9xlarge.search" | "c5.18xlarge.search" | "t3.nano.search" | "t3.micro.search" | "t3.small.search" | "t3.medium.search" | "t3.large.search" | "t3.xlarge.search" | "t3.2xlarge.search" | "or1.medium.search" | "or1.large.search" | "or1.xlarge.search" | "or1.2xlarge.search" | "or1.4xlarge.search" | "or1.8xlarge.search" | "or1.12xlarge.search" | "or1.16xlarge.search" | "ultrawarm1.medium.search" | "ultrawarm1.large.search" | "ultrawarm1.xlarge.search" | "t2.micro.search" | "t2.small.search" | "t2.medium.search" | "r3.large.search" | "r3.xlarge.search" | "r3.2xlarge.search" | "r3.4xlarge.search" | "r3.8xlarge.search" | "i2.xlarge.search" | "i2.2xlarge.search" | "d2.xlarge.search" | "d2.2xlarge.search" | "d2.4xlarge.search" | "d2.8xlarge.search" | "c4.large.search" | "c4.xlarge.search" | "c4.2xlarge.search" | "c4.4xlarge.search" | "c4.8xlarge.search" | "r4.large.search" | "r4.xlarge.search" | "r4.2xlarge.search" | "r4.4xlarge.search" | "r4.8xlarge.search" | "r4.16xlarge.search" | "i3.large.search" | "i3.xlarge.search" | "i3.2xlarge.search" | "i3.4xlarge.search" | "i3.8xlarge.search" | "i3.16xlarge.search" | "r6g.large.search" | "r6g.xlarge.search" | "r6g.2xlarge.search" | "r6g.4xlarge.search" | "r6g.8xlarge.search" | "r6g.12xlarge.search" | "m6g.large.search" | "m6g.xlarge.search" | "m6g.2xlarge.search" | "m6g.4xlarge.search" | "m6g.8xlarge.search" | "m6g.12xlarge.search" | "c6g.large.search" | "c6g.xlarge.search" | "c6g.2xlarge.search" | "c6g.4xlarge.search" | "c6g.8xlarge.search" | "c6g.12xlarge.search" | "r6gd.large.search" | "r6gd.xlarge.search" | "r6gd.2xlarge.search" | "r6gd.4xlarge.search" | "r6gd.8xlarge.search" | "r6gd.12xlarge.search" | "r6gd.16xlarge.search" | "t4g.small.search" | "t4g.medium.search")?, + count: ::Integer? + }? + }, + ]? }, ?ebs_options: { ebs_enabled: bool?, @@ -970,7 +980,17 @@ module Aws cold_storage_options: { enabled: bool }?, - multi_az_with_standby_enabled: bool? + multi_az_with_standby_enabled: bool?, + node_options: Array[ + { + node_type: ("coordinator")?, + node_config: { + enabled: bool?, + type: ("m3.medium.search" | "m3.large.search" | "m3.xlarge.search" | "m3.2xlarge.search" | "m4.large.search" | "m4.xlarge.search" | "m4.2xlarge.search" | "m4.4xlarge.search" | "m4.10xlarge.search" | "m5.large.search" | "m5.xlarge.search" | "m5.2xlarge.search" | "m5.4xlarge.search" | "m5.12xlarge.search" | "m5.24xlarge.search" | "r5.large.search" | "r5.xlarge.search" | "r5.2xlarge.search" | "r5.4xlarge.search" | "r5.12xlarge.search" | "r5.24xlarge.search" | "c5.large.search" | "c5.xlarge.search" | "c5.2xlarge.search" | "c5.4xlarge.search" | "c5.9xlarge.search" | "c5.18xlarge.search" | "t3.nano.search" | "t3.micro.search" | "t3.small.search" | "t3.medium.search" | "t3.large.search" | "t3.xlarge.search" | "t3.2xlarge.search" | "or1.medium.search" | "or1.large.search" | "or1.xlarge.search" | "or1.2xlarge.search" | "or1.4xlarge.search" | "or1.8xlarge.search" | "or1.12xlarge.search" | "or1.16xlarge.search" | "ultrawarm1.medium.search" | "ultrawarm1.large.search" | "ultrawarm1.xlarge.search" | "t2.micro.search" | "t2.small.search" | "t2.medium.search" | "r3.large.search" | "r3.xlarge.search" | "r3.2xlarge.search" | "r3.4xlarge.search" | "r3.8xlarge.search" | "i2.xlarge.search" | "i2.2xlarge.search" | "d2.xlarge.search" | "d2.2xlarge.search" | "d2.4xlarge.search" | "d2.8xlarge.search" | "c4.large.search" | "c4.xlarge.search" | "c4.2xlarge.search" | "c4.4xlarge.search" | "c4.8xlarge.search" | "r4.large.search" | "r4.xlarge.search" | "r4.2xlarge.search" | "r4.4xlarge.search" | "r4.8xlarge.search" | "r4.16xlarge.search" | "i3.large.search" | "i3.xlarge.search" | "i3.2xlarge.search" | "i3.4xlarge.search" | "i3.8xlarge.search" | "i3.16xlarge.search" | "r6g.large.search" | "r6g.xlarge.search" | "r6g.2xlarge.search" | "r6g.4xlarge.search" | "r6g.8xlarge.search" | "r6g.12xlarge.search" | "m6g.large.search" | "m6g.xlarge.search" | "m6g.2xlarge.search" | "m6g.4xlarge.search" | "m6g.8xlarge.search" | "m6g.12xlarge.search" | "c6g.large.search" | "c6g.xlarge.search" | "c6g.2xlarge.search" | "c6g.4xlarge.search" | "c6g.8xlarge.search" | "c6g.12xlarge.search" | "r6gd.large.search" | "r6gd.xlarge.search" | "r6gd.2xlarge.search" | "r6gd.4xlarge.search" | "r6gd.8xlarge.search" | "r6gd.12xlarge.search" | "r6gd.16xlarge.search" | "t4g.small.search" | "t4g.medium.search")?, + count: ::Integer? + }? + }, + ]? }, ?ebs_options: { ebs_enabled: bool?, diff --git a/gems/aws-sdk-opensearchservice/sig/types.rbs b/gems/aws-sdk-opensearchservice/sig/types.rbs index 715d2d6a5c2..57af0b47589 100644 --- a/gems/aws-sdk-opensearchservice/sig/types.rbs +++ b/gems/aws-sdk-opensearchservice/sig/types.rbs @@ -281,6 +281,7 @@ module Aws::OpenSearchService attr_accessor warm_count: ::Integer attr_accessor cold_storage_options: Types::ColdStorageOptions attr_accessor multi_az_with_standby_enabled: bool + attr_accessor node_options: ::Array[Types::NodeOption] SENSITIVE: [] end @@ -1226,6 +1227,19 @@ module Aws::OpenSearchService SENSITIVE: [] end + class NodeConfig + attr_accessor enabled: bool + attr_accessor type: ("m3.medium.search" | "m3.large.search" | "m3.xlarge.search" | "m3.2xlarge.search" | "m4.large.search" | "m4.xlarge.search" | "m4.2xlarge.search" | "m4.4xlarge.search" | "m4.10xlarge.search" | "m5.large.search" | "m5.xlarge.search" | "m5.2xlarge.search" | "m5.4xlarge.search" | "m5.12xlarge.search" | "m5.24xlarge.search" | "r5.large.search" | "r5.xlarge.search" | "r5.2xlarge.search" | "r5.4xlarge.search" | "r5.12xlarge.search" | "r5.24xlarge.search" | "c5.large.search" | "c5.xlarge.search" | "c5.2xlarge.search" | "c5.4xlarge.search" | "c5.9xlarge.search" | "c5.18xlarge.search" | "t3.nano.search" | "t3.micro.search" | "t3.small.search" | "t3.medium.search" | "t3.large.search" | "t3.xlarge.search" | "t3.2xlarge.search" | "or1.medium.search" | "or1.large.search" | "or1.xlarge.search" | "or1.2xlarge.search" | "or1.4xlarge.search" | "or1.8xlarge.search" | "or1.12xlarge.search" | "or1.16xlarge.search" | "ultrawarm1.medium.search" | "ultrawarm1.large.search" | "ultrawarm1.xlarge.search" | "t2.micro.search" | "t2.small.search" | "t2.medium.search" | "r3.large.search" | "r3.xlarge.search" | "r3.2xlarge.search" | "r3.4xlarge.search" | "r3.8xlarge.search" | "i2.xlarge.search" | "i2.2xlarge.search" | "d2.xlarge.search" | "d2.2xlarge.search" | "d2.4xlarge.search" | "d2.8xlarge.search" | "c4.large.search" | "c4.xlarge.search" | "c4.2xlarge.search" | "c4.4xlarge.search" | "c4.8xlarge.search" | "r4.large.search" | "r4.xlarge.search" | "r4.2xlarge.search" | "r4.4xlarge.search" | "r4.8xlarge.search" | "r4.16xlarge.search" | "i3.large.search" | "i3.xlarge.search" | "i3.2xlarge.search" | "i3.4xlarge.search" | "i3.8xlarge.search" | "i3.16xlarge.search" | "r6g.large.search" | "r6g.xlarge.search" | "r6g.2xlarge.search" | "r6g.4xlarge.search" | "r6g.8xlarge.search" | "r6g.12xlarge.search" | "m6g.large.search" | "m6g.xlarge.search" | "m6g.2xlarge.search" | "m6g.4xlarge.search" | "m6g.8xlarge.search" | "m6g.12xlarge.search" | "c6g.large.search" | "c6g.xlarge.search" | "c6g.2xlarge.search" | "c6g.4xlarge.search" | "c6g.8xlarge.search" | "c6g.12xlarge.search" | "r6gd.large.search" | "r6gd.xlarge.search" | "r6gd.2xlarge.search" | "r6gd.4xlarge.search" | "r6gd.8xlarge.search" | "r6gd.12xlarge.search" | "r6gd.16xlarge.search" | "t4g.small.search" | "t4g.medium.search") + attr_accessor count: ::Integer + SENSITIVE: [] + end + + class NodeOption + attr_accessor node_type: ("coordinator") + attr_accessor node_config: Types::NodeConfig + SENSITIVE: [] + end + class NodeToNodeEncryptionOptions attr_accessor enabled: bool SENSITIVE: [] diff --git a/gems/aws-sdk-rds/CHANGELOG.md b/gems/aws-sdk-rds/CHANGELOG.md index 53c328a37ec..02ac1e7e528 100644 --- a/gems/aws-sdk-rds/CHANGELOG.md +++ b/gems/aws-sdk-rds/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.256.0 (2024-10-28) +------------------ + +* Feature - This release adds support for Enhanced Monitoring and Performance Insights when restoring Aurora Limitless Database DB clusters. It also adds support for the os-upgrade pending maintenance action. + 1.255.0 (2024-10-22) ------------------ diff --git a/gems/aws-sdk-rds/VERSION b/gems/aws-sdk-rds/VERSION index 31aba6e0ce4..1a79657bb7b 100644 --- a/gems/aws-sdk-rds/VERSION +++ b/gems/aws-sdk-rds/VERSION @@ -1 +1 @@ -1.255.0 +1.256.0 diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds.rb index 7105cd89dde..0523a0ccd21 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds.rb @@ -80,7 +80,7 @@ module Plugins autoload :ReservedDBInstancesOffering, 'aws-sdk-rds/reserved_db_instances_offering' autoload :ResourcePendingMaintenanceActionList, 'aws-sdk-rds/resource_pending_maintenance_action_list' - GEM_VERSION = '1.255.0' + GEM_VERSION = '1.256.0' end diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb index 7773263b728..c5d02ad40dd 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb @@ -725,8 +725,25 @@ def add_tags_to_resource(params = {}, options = {}) # @option params [required, String] :apply_action # The pending maintenance action to apply to this resource. # - # Valid Values: `system-update`, `db-upgrade`, `hardware-maintenance`, - # `ca-certificate-rotation` + # Valid Values: + # + # * `ca-certificate-rotation` + # + # * `db-upgrade` + # + # * `hardware-maintenance` + # + # * `os-upgrade` + # + # * `system-update` + # + # For more information about these actions, see [Maintenance actions for + # Amazon Aurora][1] or [Maintenance actions for Amazon RDS][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#maintenance-actions-aurora + # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#maintenance-actions-rds # # @option params [required, String] :opt_in_type # A value that specifies the type of opt-in request, or undoes an opt-in @@ -3352,6 +3369,11 @@ def create_custom_db_engine_version(params = {}, options = {}) # # Valid for: Aurora DB clusters only # + # This setting is no longer used. Instead use the + # `ClusterScalabilityType` setting. + # + # + # # @option params [Types::ServerlessV2ScalingConfiguration] :serverless_v2_scaling_configuration # Contains the scaling configuration of an Aurora Serverless v2 DB # cluster. @@ -19382,6 +19404,12 @@ def modify_custom_db_engine_version(params = {}, options = {}) # # Valid for: Aurora DB clusters only # + # This setting is no longer used. Instead use the + # `ClusterScalabilityType` setting when you create your Aurora Limitless + # Database DB cluster. + # + # + # # @option params [String] :ca_certificate_identifier # The CA certificate identifier to use for the DB cluster's server # certificate. @@ -25752,6 +25780,59 @@ def restore_db_cluster_from_s3(params = {}, options = {}) # @option params [Types::RdsCustomClusterConfiguration] :rds_custom_cluster_configuration # Reserved for future use. # + # @option params [Integer] :monitoring_interval + # The interval, in seconds, between points when Enhanced Monitoring + # metrics are collected for the DB cluster. To turn off collecting + # Enhanced Monitoring metrics, specify `0`. + # + # If `MonitoringRoleArn` is specified, also set `MonitoringInterval` to + # a value other than `0`. + # + # Valid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60` + # + # Default: `0` + # + # @option params [String] :monitoring_role_arn + # The Amazon Resource Name (ARN) for the IAM role that permits RDS to + # send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example + # is `arn:aws:iam:123456789012:role/emaccess`. + # + # If `MonitoringInterval` is set to a value other than `0`, supply a + # `MonitoringRoleArn` value. + # + # @option params [Boolean] :enable_performance_insights + # Specifies whether to turn on Performance Insights for the DB cluster. + # + # @option params [String] :performance_insights_kms_key_id + # The Amazon Web Services KMS key identifier for encryption of + # Performance Insights data. + # + # The Amazon Web Services KMS key identifier is the key ARN, key ID, + # alias ARN, or alias name for the KMS key. + # + # If you don't specify a value for `PerformanceInsightsKMSKeyId`, then + # Amazon RDS uses your default KMS key. There is a default KMS key for + # your Amazon Web Services account. Your Amazon Web Services account has + # a different default KMS key for each Amazon Web Services Region. + # + # @option params [Integer] :performance_insights_retention_period + # The number of days to retain Performance Insights data. + # + # Valid Values: + # + # * `7` + # + # * *month* * 31, where *month* is a number of months from 1-23. + # Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 + # months * 31) + # + # * `731` + # + # Default: `7` days + # + # If you specify a retention period that isn't valid, such as `94`, + # Amazon RDS issues an error. + # # @option params [String] :engine_lifecycle_support # The life cycle type for this DB cluster. # @@ -25910,6 +25991,11 @@ def restore_db_cluster_from_s3(params = {}, options = {}) # transit_gateway_multicast_domain_id: "String", # replica_mode: "open-read-only", # accepts open-read-only, mounted # }, + # monitoring_interval: 1, + # monitoring_role_arn: "String", + # enable_performance_insights: false, + # performance_insights_kms_key_id: "String", + # performance_insights_retention_period: 1, # engine_lifecycle_support: "String", # }) # @@ -26493,6 +26579,59 @@ def restore_db_cluster_from_snapshot(params = {}, options = {}) # @option params [Types::RdsCustomClusterConfiguration] :rds_custom_cluster_configuration # Reserved for future use. # + # @option params [Integer] :monitoring_interval + # The interval, in seconds, between points when Enhanced Monitoring + # metrics are collected for the DB cluster. To turn off collecting + # Enhanced Monitoring metrics, specify `0`. + # + # If `MonitoringRoleArn` is specified, also set `MonitoringInterval` to + # a value other than `0`. + # + # Valid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60` + # + # Default: `0` + # + # @option params [String] :monitoring_role_arn + # The Amazon Resource Name (ARN) for the IAM role that permits RDS to + # send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example + # is `arn:aws:iam:123456789012:role/emaccess`. + # + # If `MonitoringInterval` is set to a value other than `0`, supply a + # `MonitoringRoleArn` value. + # + # @option params [Boolean] :enable_performance_insights + # Specifies whether to turn on Performance Insights for the DB cluster. + # + # @option params [String] :performance_insights_kms_key_id + # The Amazon Web Services KMS key identifier for encryption of + # Performance Insights data. + # + # The Amazon Web Services KMS key identifier is the key ARN, key ID, + # alias ARN, or alias name for the KMS key. + # + # If you don't specify a value for `PerformanceInsightsKMSKeyId`, then + # Amazon RDS uses your default KMS key. There is a default KMS key for + # your Amazon Web Services account. Your Amazon Web Services account has + # a different default KMS key for each Amazon Web Services Region. + # + # @option params [Integer] :performance_insights_retention_period + # The number of days to retain Performance Insights data. + # + # Valid Values: + # + # * `7` + # + # * *month* * 31, where *month* is a number of months from 1-23. + # Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 + # months * 31) + # + # * `731` + # + # Default: `7` days + # + # If you specify a retention period that isn't valid, such as `94`, + # Amazon RDS issues an error. + # # @option params [String] :engine_lifecycle_support # The life cycle type for this DB cluster. # @@ -26650,6 +26789,11 @@ def restore_db_cluster_from_snapshot(params = {}, options = {}) # transit_gateway_multicast_domain_id: "String", # replica_mode: "open-read-only", # accepts open-read-only, mounted # }, + # monitoring_interval: 1, + # monitoring_role_arn: "String", + # enable_performance_insights: false, + # performance_insights_kms_key_id: "String", + # performance_insights_retention_period: 1, # engine_lifecycle_support: "String", # }) # @@ -31542,7 +31686,7 @@ def build_request(operation_name, params = {}) tracer: tracer ) context[:gem_name] = 'aws-sdk-rds' - context[:gem_version] = '1.255.0' + context[:gem_version] = '1.256.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb index afcb2f7c2fb..300c884a33e 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb @@ -3812,6 +3812,11 @@ module ClientApi RestoreDBClusterFromSnapshotMessage.add_member(:serverless_v2_scaling_configuration, Shapes::ShapeRef.new(shape: ServerlessV2ScalingConfiguration, location_name: "ServerlessV2ScalingConfiguration")) RestoreDBClusterFromSnapshotMessage.add_member(:network_type, Shapes::ShapeRef.new(shape: String, location_name: "NetworkType")) RestoreDBClusterFromSnapshotMessage.add_member(:rds_custom_cluster_configuration, Shapes::ShapeRef.new(shape: RdsCustomClusterConfiguration, location_name: "RdsCustomClusterConfiguration")) + RestoreDBClusterFromSnapshotMessage.add_member(:monitoring_interval, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "MonitoringInterval")) + RestoreDBClusterFromSnapshotMessage.add_member(:monitoring_role_arn, Shapes::ShapeRef.new(shape: String, location_name: "MonitoringRoleArn")) + RestoreDBClusterFromSnapshotMessage.add_member(:enable_performance_insights, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "EnablePerformanceInsights")) + RestoreDBClusterFromSnapshotMessage.add_member(:performance_insights_kms_key_id, Shapes::ShapeRef.new(shape: String, location_name: "PerformanceInsightsKMSKeyId")) + RestoreDBClusterFromSnapshotMessage.add_member(:performance_insights_retention_period, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "PerformanceInsightsRetentionPeriod")) RestoreDBClusterFromSnapshotMessage.add_member(:engine_lifecycle_support, Shapes::ShapeRef.new(shape: String, location_name: "EngineLifecycleSupport")) RestoreDBClusterFromSnapshotMessage.struct_class = Types::RestoreDBClusterFromSnapshotMessage @@ -3847,6 +3852,11 @@ module ClientApi RestoreDBClusterToPointInTimeMessage.add_member(:network_type, Shapes::ShapeRef.new(shape: String, location_name: "NetworkType")) RestoreDBClusterToPointInTimeMessage.add_member(:source_db_cluster_resource_id, Shapes::ShapeRef.new(shape: String, location_name: "SourceDbClusterResourceId")) RestoreDBClusterToPointInTimeMessage.add_member(:rds_custom_cluster_configuration, Shapes::ShapeRef.new(shape: RdsCustomClusterConfiguration, location_name: "RdsCustomClusterConfiguration")) + RestoreDBClusterToPointInTimeMessage.add_member(:monitoring_interval, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "MonitoringInterval")) + RestoreDBClusterToPointInTimeMessage.add_member(:monitoring_role_arn, Shapes::ShapeRef.new(shape: String, location_name: "MonitoringRoleArn")) + RestoreDBClusterToPointInTimeMessage.add_member(:enable_performance_insights, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "EnablePerformanceInsights")) + RestoreDBClusterToPointInTimeMessage.add_member(:performance_insights_kms_key_id, Shapes::ShapeRef.new(shape: String, location_name: "PerformanceInsightsKMSKeyId")) + RestoreDBClusterToPointInTimeMessage.add_member(:performance_insights_retention_period, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "PerformanceInsightsRetentionPeriod")) RestoreDBClusterToPointInTimeMessage.add_member(:engine_lifecycle_support, Shapes::ShapeRef.new(shape: String, location_name: "EngineLifecycleSupport")) RestoreDBClusterToPointInTimeMessage.struct_class = Types::RestoreDBClusterToPointInTimeMessage diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb index e68144e2229..12492c00133 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb @@ -1643,6 +1643,11 @@ def wait_until(options = {}, &block) # Aurora Limitless Database to create a DB shard group. # # Valid for: Aurora DB clusters only + # + # This setting is no longer used. Instead use the + # `ClusterScalabilityType` setting. + # + # # @option options [Types::ServerlessV2ScalingConfiguration] :serverless_v2_scaling_configuration # Contains the scaling configuration of an Aurora Serverless v2 DB # cluster. @@ -2612,6 +2617,12 @@ def failover(options = {}) # Aurora Limitless Database to create a DB shard group. # # Valid for: Aurora DB clusters only + # + # This setting is no longer used. Instead use the + # `ClusterScalabilityType` setting when you create your Aurora Limitless + # Database DB cluster. + # + # # @option options [String] :ca_certificate_identifier # The CA certificate identifier to use for the DB cluster's server # certificate. @@ -2687,6 +2698,11 @@ def modify(options = {}) # transit_gateway_multicast_domain_id: "String", # replica_mode: "open-read-only", # accepts open-read-only, mounted # }, + # monitoring_interval: 1, + # monitoring_role_arn: "String", + # enable_performance_insights: false, + # performance_insights_kms_key_id: "String", + # performance_insights_retention_period: 1, # engine_lifecycle_support: "String", # }) # @param [Hash] options ({}) @@ -3053,6 +3069,54 @@ def modify(options = {}) # The resource ID of the source DB cluster from which to restore. # @option options [Types::RdsCustomClusterConfiguration] :rds_custom_cluster_configuration # Reserved for future use. + # @option options [Integer] :monitoring_interval + # The interval, in seconds, between points when Enhanced Monitoring + # metrics are collected for the DB cluster. To turn off collecting + # Enhanced Monitoring metrics, specify `0`. + # + # If `MonitoringRoleArn` is specified, also set `MonitoringInterval` to + # a value other than `0`. + # + # Valid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60` + # + # Default: `0` + # @option options [String] :monitoring_role_arn + # The Amazon Resource Name (ARN) for the IAM role that permits RDS to + # send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example + # is `arn:aws:iam:123456789012:role/emaccess`. + # + # If `MonitoringInterval` is set to a value other than `0`, supply a + # `MonitoringRoleArn` value. + # @option options [Boolean] :enable_performance_insights + # Specifies whether to turn on Performance Insights for the DB cluster. + # @option options [String] :performance_insights_kms_key_id + # The Amazon Web Services KMS key identifier for encryption of + # Performance Insights data. + # + # The Amazon Web Services KMS key identifier is the key ARN, key ID, + # alias ARN, or alias name for the KMS key. + # + # If you don't specify a value for `PerformanceInsightsKMSKeyId`, then + # Amazon RDS uses your default KMS key. There is a default KMS key for + # your Amazon Web Services account. Your Amazon Web Services account has + # a different default KMS key for each Amazon Web Services Region. + # @option options [Integer] :performance_insights_retention_period + # The number of days to retain Performance Insights data. + # + # Valid Values: + # + # * `7` + # + # * *month* * 31, where *month* is a number of months from 1-23. + # Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 + # months * 31) + # + # * `731` + # + # Default: `7` days + # + # If you specify a retention period that isn't valid, such as `94`, + # Amazon RDS issues an error. # @option options [String] :engine_lifecycle_support # The life cycle type for this DB cluster. # diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb index a794fac70e7..582cefb0c8f 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb @@ -599,6 +599,11 @@ def delete(options = {}) # transit_gateway_multicast_domain_id: "String", # replica_mode: "open-read-only", # accepts open-read-only, mounted # }, + # monitoring_interval: 1, + # monitoring_role_arn: "String", + # enable_performance_insights: false, + # performance_insights_kms_key_id: "String", + # performance_insights_retention_period: 1, # engine_lifecycle_support: "String", # }) # @param [Hash] options ({}) @@ -982,6 +987,54 @@ def delete(options = {}) # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html # @option options [Types::RdsCustomClusterConfiguration] :rds_custom_cluster_configuration # Reserved for future use. + # @option options [Integer] :monitoring_interval + # The interval, in seconds, between points when Enhanced Monitoring + # metrics are collected for the DB cluster. To turn off collecting + # Enhanced Monitoring metrics, specify `0`. + # + # If `MonitoringRoleArn` is specified, also set `MonitoringInterval` to + # a value other than `0`. + # + # Valid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60` + # + # Default: `0` + # @option options [String] :monitoring_role_arn + # The Amazon Resource Name (ARN) for the IAM role that permits RDS to + # send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example + # is `arn:aws:iam:123456789012:role/emaccess`. + # + # If `MonitoringInterval` is set to a value other than `0`, supply a + # `MonitoringRoleArn` value. + # @option options [Boolean] :enable_performance_insights + # Specifies whether to turn on Performance Insights for the DB cluster. + # @option options [String] :performance_insights_kms_key_id + # The Amazon Web Services KMS key identifier for encryption of + # Performance Insights data. + # + # The Amazon Web Services KMS key identifier is the key ARN, key ID, + # alias ARN, or alias name for the KMS key. + # + # If you don't specify a value for `PerformanceInsightsKMSKeyId`, then + # Amazon RDS uses your default KMS key. There is a default KMS key for + # your Amazon Web Services account. Your Amazon Web Services account has + # a different default KMS key for each Amazon Web Services Region. + # @option options [Integer] :performance_insights_retention_period + # The number of days to retain Performance Insights data. + # + # Valid Values: + # + # * `7` + # + # * *month* * 31, where *month* is a number of months from 1-23. + # Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 + # months * 31) + # + # * `731` + # + # Default: `7` days + # + # If you specify a retention period that isn't valid, such as `94`, + # Amazon RDS issues an error. # @option options [String] :engine_lifecycle_support # The life cycle type for this DB cluster. # diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/resource.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/resource.rb index e76c564594c..8ee2a2d600e 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/resource.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/resource.rb @@ -823,6 +823,11 @@ def client # Aurora Limitless Database to create a DB shard group. # # Valid for: Aurora DB clusters only + # + # This setting is no longer used. Instead use the + # `ClusterScalabilityType` setting. + # + # # @option options [Types::ServerlessV2ScalingConfiguration] :serverless_v2_scaling_configuration # Contains the scaling configuration of an Aurora Serverless v2 DB # cluster. diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb index e6a9d9479ac..ad635341426 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb @@ -285,8 +285,25 @@ class AddTagsToResourceMessage < Struct.new( # @!attribute [rw] apply_action # The pending maintenance action to apply to this resource. # - # Valid Values: `system-update`, `db-upgrade`, `hardware-maintenance`, - # `ca-certificate-rotation` + # Valid Values: + # + # * `ca-certificate-rotation` + # + # * `db-upgrade` + # + # * `hardware-maintenance` + # + # * `os-upgrade` + # + # * `system-update` + # + # For more information about these actions, see [Maintenance actions + # for Amazon Aurora][1] or [Maintenance actions for Amazon RDS][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#maintenance-actions-aurora + # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#maintenance-actions-rds # @return [String] # # @!attribute [rw] opt_in_type @@ -2884,6 +2901,11 @@ class CreateDBClusterEndpointMessage < Struct.new( # enable Aurora Limitless Database to create a DB shard group. # # Valid for: Aurora DB clusters only + # + # This setting is no longer used. Instead use the + # `ClusterScalabilityType` setting. + # + # # @return [Boolean] # # @!attribute [rw] serverless_v2_scaling_configuration @@ -17888,6 +17910,12 @@ class ModifyDBClusterEndpointMessage < Struct.new( # enable Aurora Limitless Database to create a DB shard group. # # Valid for: Aurora DB clusters only + # + # This setting is no longer used. Instead use the + # `ClusterScalabilityType` setting when you create your Aurora + # Limitless Database DB cluster. + # + # # @return [Boolean] # # @!attribute [rw] ca_certificate_identifier @@ -21009,12 +21037,26 @@ class PendingCloudwatchLogsExports < Struct.new( # For more information about maintenance actions, see [Maintaining a # DB instance][1]. # - # Valid Values:` system-update | db-upgrade | hardware-maintenance | - # ca-certificate-rotation` + # Valid Values: + # + # * `ca-certificate-rotation` + # + # * `db-upgrade` + # + # * `hardware-maintenance` + # + # * `os-upgrade` + # + # * `system-update` + # + # For more information about these actions, see [Maintenance actions + # for Amazon Aurora][2] or [Maintenance actions for Amazon RDS][3]. # # # # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html + # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#maintenance-actions-aurora + # [3]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#maintenance-actions-rds # @return [String] # # @!attribute [rw] auto_applied_after_date @@ -23475,6 +23517,66 @@ class RestoreDBClusterFromS3Result < Struct.new( # Reserved for future use. # @return [Types::RdsCustomClusterConfiguration] # + # @!attribute [rw] monitoring_interval + # The interval, in seconds, between points when Enhanced Monitoring + # metrics are collected for the DB cluster. To turn off collecting + # Enhanced Monitoring metrics, specify `0`. + # + # If `MonitoringRoleArn` is specified, also set `MonitoringInterval` + # to a value other than `0`. + # + # Valid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60` + # + # Default: `0` + # @return [Integer] + # + # @!attribute [rw] monitoring_role_arn + # The Amazon Resource Name (ARN) for the IAM role that permits RDS to + # send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An + # example is `arn:aws:iam:123456789012:role/emaccess`. + # + # If `MonitoringInterval` is set to a value other than `0`, supply a + # `MonitoringRoleArn` value. + # @return [String] + # + # @!attribute [rw] enable_performance_insights + # Specifies whether to turn on Performance Insights for the DB + # cluster. + # @return [Boolean] + # + # @!attribute [rw] performance_insights_kms_key_id + # The Amazon Web Services KMS key identifier for encryption of + # Performance Insights data. + # + # The Amazon Web Services KMS key identifier is the key ARN, key ID, + # alias ARN, or alias name for the KMS key. + # + # If you don't specify a value for `PerformanceInsightsKMSKeyId`, + # then Amazon RDS uses your default KMS key. There is a default KMS + # key for your Amazon Web Services account. Your Amazon Web Services + # account has a different default KMS key for each Amazon Web Services + # Region. + # @return [String] + # + # @!attribute [rw] performance_insights_retention_period + # The number of days to retain Performance Insights data. + # + # Valid Values: + # + # * `7` + # + # * *month* * 31, where *month* is a number of months from 1-23. + # Examples: `93` (3 months * 31), `341` (11 months * 31), `589` + # (19 months * 31) + # + # * `731` + # + # Default: `7` days + # + # If you specify a retention period that isn't valid, such as `94`, + # Amazon RDS issues an error. + # @return [Integer] + # # @!attribute [rw] engine_lifecycle_support # The life cycle type for this DB cluster. # @@ -23546,6 +23648,11 @@ class RestoreDBClusterFromSnapshotMessage < Struct.new( :serverless_v2_scaling_configuration, :network_type, :rds_custom_cluster_configuration, + :monitoring_interval, + :monitoring_role_arn, + :enable_performance_insights, + :performance_insights_kms_key_id, + :performance_insights_retention_period, :engine_lifecycle_support) SENSITIVE = [] include Aws::Structure @@ -24022,6 +24129,66 @@ class RestoreDBClusterFromSnapshotResult < Struct.new( # Reserved for future use. # @return [Types::RdsCustomClusterConfiguration] # + # @!attribute [rw] monitoring_interval + # The interval, in seconds, between points when Enhanced Monitoring + # metrics are collected for the DB cluster. To turn off collecting + # Enhanced Monitoring metrics, specify `0`. + # + # If `MonitoringRoleArn` is specified, also set `MonitoringInterval` + # to a value other than `0`. + # + # Valid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60` + # + # Default: `0` + # @return [Integer] + # + # @!attribute [rw] monitoring_role_arn + # The Amazon Resource Name (ARN) for the IAM role that permits RDS to + # send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An + # example is `arn:aws:iam:123456789012:role/emaccess`. + # + # If `MonitoringInterval` is set to a value other than `0`, supply a + # `MonitoringRoleArn` value. + # @return [String] + # + # @!attribute [rw] enable_performance_insights + # Specifies whether to turn on Performance Insights for the DB + # cluster. + # @return [Boolean] + # + # @!attribute [rw] performance_insights_kms_key_id + # The Amazon Web Services KMS key identifier for encryption of + # Performance Insights data. + # + # The Amazon Web Services KMS key identifier is the key ARN, key ID, + # alias ARN, or alias name for the KMS key. + # + # If you don't specify a value for `PerformanceInsightsKMSKeyId`, + # then Amazon RDS uses your default KMS key. There is a default KMS + # key for your Amazon Web Services account. Your Amazon Web Services + # account has a different default KMS key for each Amazon Web Services + # Region. + # @return [String] + # + # @!attribute [rw] performance_insights_retention_period + # The number of days to retain Performance Insights data. + # + # Valid Values: + # + # * `7` + # + # * *month* * 31, where *month* is a number of months from 1-23. + # Examples: `93` (3 months * 31), `341` (11 months * 31), `589` + # (19 months * 31) + # + # * `731` + # + # Default: `7` days + # + # If you specify a retention period that isn't valid, such as `94`, + # Amazon RDS issues an error. + # @return [Integer] + # # @!attribute [rw] engine_lifecycle_support # The life cycle type for this DB cluster. # @@ -24093,6 +24260,11 @@ class RestoreDBClusterToPointInTimeMessage < Struct.new( :network_type, :source_db_cluster_resource_id, :rds_custom_cluster_configuration, + :monitoring_interval, + :monitoring_role_arn, + :enable_performance_insights, + :performance_insights_kms_key_id, + :performance_insights_retention_period, :engine_lifecycle_support) SENSITIVE = [] include Aws::Structure diff --git a/gems/aws-sdk-rds/sig/client.rbs b/gems/aws-sdk-rds/sig/client.rbs index 802f7e1d0bd..79cbac853b4 100644 --- a/gems/aws-sdk-rds/sig/client.rbs +++ b/gems/aws-sdk-rds/sig/client.rbs @@ -3011,6 +3011,11 @@ module Aws transit_gateway_multicast_domain_id: ::String?, replica_mode: ("open-read-only" | "mounted")? }, + ?monitoring_interval: ::Integer, + ?monitoring_role_arn: ::String, + ?enable_performance_insights: bool, + ?performance_insights_kms_key_id: ::String, + ?performance_insights_retention_period: ::Integer, ?engine_lifecycle_support: ::String ) -> _RestoreDBClusterFromSnapshotResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _RestoreDBClusterFromSnapshotResponseSuccess @@ -3069,6 +3074,11 @@ module Aws transit_gateway_multicast_domain_id: ::String?, replica_mode: ("open-read-only" | "mounted")? }, + ?monitoring_interval: ::Integer, + ?monitoring_role_arn: ::String, + ?enable_performance_insights: bool, + ?performance_insights_kms_key_id: ::String, + ?performance_insights_retention_period: ::Integer, ?engine_lifecycle_support: ::String ) -> _RestoreDBClusterToPointInTimeResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _RestoreDBClusterToPointInTimeResponseSuccess diff --git a/gems/aws-sdk-rds/sig/db_cluster.rbs b/gems/aws-sdk-rds/sig/db_cluster.rbs index dcdcb37b782..1274d9fb846 100644 --- a/gems/aws-sdk-rds/sig/db_cluster.rbs +++ b/gems/aws-sdk-rds/sig/db_cluster.rbs @@ -487,6 +487,11 @@ module Aws transit_gateway_multicast_domain_id: ::String?, replica_mode: ("open-read-only" | "mounted")? }, + ?monitoring_interval: ::Integer, + ?monitoring_role_arn: ::String, + ?enable_performance_insights: bool, + ?performance_insights_kms_key_id: ::String, + ?performance_insights_retention_period: ::Integer, ?engine_lifecycle_support: ::String ) -> DBCluster | (?Hash[Symbol, untyped]) -> DBCluster diff --git a/gems/aws-sdk-rds/sig/db_cluster_snapshot.rbs b/gems/aws-sdk-rds/sig/db_cluster_snapshot.rbs index 1615f5643b2..28c1523f941 100644 --- a/gems/aws-sdk-rds/sig/db_cluster_snapshot.rbs +++ b/gems/aws-sdk-rds/sig/db_cluster_snapshot.rbs @@ -188,6 +188,11 @@ module Aws transit_gateway_multicast_domain_id: ::String?, replica_mode: ("open-read-only" | "mounted")? }, + ?monitoring_interval: ::Integer, + ?monitoring_role_arn: ::String, + ?enable_performance_insights: bool, + ?performance_insights_kms_key_id: ::String, + ?performance_insights_retention_period: ::Integer, ?engine_lifecycle_support: ::String ) -> DBCluster | (?Hash[Symbol, untyped]) -> DBCluster diff --git a/gems/aws-sdk-rds/sig/types.rbs b/gems/aws-sdk-rds/sig/types.rbs index 3f60fe229e0..881cb9c9956 100644 --- a/gems/aws-sdk-rds/sig/types.rbs +++ b/gems/aws-sdk-rds/sig/types.rbs @@ -3709,6 +3709,11 @@ module Aws::RDS attr_accessor serverless_v2_scaling_configuration: Types::ServerlessV2ScalingConfiguration attr_accessor network_type: ::String attr_accessor rds_custom_cluster_configuration: Types::RdsCustomClusterConfiguration + attr_accessor monitoring_interval: ::Integer + attr_accessor monitoring_role_arn: ::String + attr_accessor enable_performance_insights: bool + attr_accessor performance_insights_kms_key_id: ::String + attr_accessor performance_insights_retention_period: ::Integer attr_accessor engine_lifecycle_support: ::String SENSITIVE: [] end @@ -3748,6 +3753,11 @@ module Aws::RDS attr_accessor network_type: ::String attr_accessor source_db_cluster_resource_id: ::String attr_accessor rds_custom_cluster_configuration: Types::RdsCustomClusterConfiguration + attr_accessor monitoring_interval: ::Integer + attr_accessor monitoring_role_arn: ::String + attr_accessor enable_performance_insights: bool + attr_accessor performance_insights_kms_key_id: ::String + attr_accessor performance_insights_retention_period: ::Integer attr_accessor engine_lifecycle_support: ::String SENSITIVE: [] end diff --git a/gems/aws-sdk-storagegateway/CHANGELOG.md b/gems/aws-sdk-storagegateway/CHANGELOG.md index e468b1aad3d..740559ed83f 100644 --- a/gems/aws-sdk-storagegateway/CHANGELOG.md +++ b/gems/aws-sdk-storagegateway/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.100.0 (2024-10-28) +------------------ + +* Feature - Documentation update: Amazon FSx File Gateway will no longer be available to new customers. + 1.99.0 (2024-10-18) ------------------ diff --git a/gems/aws-sdk-storagegateway/VERSION b/gems/aws-sdk-storagegateway/VERSION index 9eb2e1ff9c2..7a9fecd3f0e 100644 --- a/gems/aws-sdk-storagegateway/VERSION +++ b/gems/aws-sdk-storagegateway/VERSION @@ -1 +1 @@ -1.99.0 +1.100.0 diff --git a/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway.rb b/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway.rb index cd9b029c3a1..b60b0fe9122 100644 --- a/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway.rb +++ b/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway.rb @@ -54,7 +54,7 @@ module Plugins autoload :EndpointProvider, 'aws-sdk-storagegateway/endpoint_provider' autoload :Endpoints, 'aws-sdk-storagegateway/endpoints' - GEM_VERSION = '1.99.0' + GEM_VERSION = '1.100.0' end diff --git a/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway/client.rb b/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway/client.rb index acc0da9fb61..4d136d95f60 100644 --- a/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway/client.rb +++ b/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway/client.rb @@ -516,9 +516,18 @@ def initialize(*args) # specified is critical to all later functions of the gateway and cannot # be changed after activation. The default value is `CACHED`. # + # Amazon FSx File Gateway is no longer available to new customers. + # Existing customers of FSx File Gateway can continue to use the service + # normally. For capabilities similar to FSx File Gateway, visit [this + # blog post][1]. + # # Valid Values: `STORED` \| `CACHED` \| `VTL` \| `FILE_S3` \| # `FILE_FSX_SMB` # + # + # + # [1]: https://aws.amazon.com/blogs/storage/switch-your-file-share-access-from-amazon-fsx-file-gateway-to-amazon-fsx-for-windows-file-server/ + # # @option params [String] :tape_drive_type # The value that indicates the type of tape drive to use for tape # gateway. This field is optional. @@ -1428,7 +1437,7 @@ def create_cached_iscsi_volume(params = {}, options = {}) # # Bucket ARN: # - # `arn:aws:s3:::my-bucket/prefix/` + # `arn:aws:s3:::amzn-s3-demo-bucket/prefix/` # # Access point ARN: # @@ -1521,6 +1530,9 @@ def create_cached_iscsi_volume(params = {}, options = {}) # `FileShareName` must be set if an S3 prefix name is set in # `LocationARN`, or if an access point or access point alias is used. # + # A valid NFS file share name can only contain the following characters: + # `a`-`z`, `A`-`Z`, `0`-`9`, `-`, `.`, and `_`. + # # # # @option params [Types::CacheAttributes] :cache_attributes @@ -1721,7 +1733,7 @@ def create_nfs_file_share(params = {}, options = {}) # # Bucket ARN: # - # `arn:aws:s3:::my-bucket/prefix/` + # `arn:aws:s3:::amzn-s3-demo-bucket/prefix/` # # Access point ARN: # @@ -1855,6 +1867,10 @@ def create_nfs_file_share(params = {}, options = {}) # `FileShareName` must be set if an S3 prefix name is set in # `LocationARN`, or if an access point or access point alias is used. # + # A valid SMB file share name cannot contain the following characters: + # `[`,`]`,`#`,`;`,`<`,`>`,`:`,`"`,``,`/`,`|`,`?`,`*`,`+`, or ASCII + # control characters `1-31`. + # # # # @option params [Types::CacheAttributes] :cache_attributes @@ -6916,6 +6932,9 @@ def update_maintenance_start_time(params = {}, options = {}) # `FileShareName` must be set if an S3 prefix name is set in # `LocationARN`, or if an access point or access point alias is used. # + # A valid NFS file share name can only contain the following characters: + # `a`-`z`, `A`-`Z`, `0`-`9`, `-`, `.`, and `_`. + # # # # @option params [Types::CacheAttributes] :cache_attributes @@ -7163,6 +7182,10 @@ def update_nfs_file_share(params = {}, options = {}) # `FileShareName` must be set if an S3 prefix name is set in # `LocationARN`, or if an access point or access point alias is used. # + # A valid SMB file share name cannot contain the following characters: + # `[`,`]`,`#`,`;`,`<`,`>`,`:`,`"`,``,`/`,`|`,`?`,`*`,`+`, or ASCII + # control characters `1-31`. + # # # # @option params [Types::CacheAttributes] :cache_attributes @@ -7557,7 +7580,7 @@ def build_request(operation_name, params = {}) tracer: tracer ) context[:gem_name] = 'aws-sdk-storagegateway' - context[:gem_version] = '1.99.0' + context[:gem_version] = '1.100.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway/types.rb b/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway/types.rb index 88731267637..8b21d7e8894 100644 --- a/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway/types.rb +++ b/gems/aws-sdk-storagegateway/lib/aws-sdk-storagegateway/types.rb @@ -80,8 +80,17 @@ module Types # specified is critical to all later functions of the gateway and # cannot be changed after activation. The default value is `CACHED`. # + # Amazon FSx File Gateway is no longer available to new customers. + # Existing customers of FSx File Gateway can continue to use the + # service normally. For capabilities similar to FSx File Gateway, + # visit [this blog post][1]. + # # Valid Values: `STORED` \| `CACHED` \| `VTL` \| `FILE_S3` \| # `FILE_FSX_SMB` + # + # + # + # [1]: https://aws.amazon.com/blogs/storage/switch-your-file-share-access-from-amazon-fsx-file-gateway-to-amazon-fsx-for-windows-file-server/ # @return [String] # # @!attribute [rw] tape_drive_type @@ -602,6 +611,10 @@ class AutomaticTapeCreationRule < Struct.new( # one or more days of the week, during which bandwidth rate limits are # specified for uploading, downloading, or both. # + # FSx File Gateway does not support this feature. + # + # + # # @!attribute [rw] start_hour_of_day # The hour of the day to start the bandwidth rate limit interval. # @return [Integer] @@ -636,7 +649,7 @@ class AutomaticTapeCreationRule < Struct.new( # # For Tape Gateway and Volume Gateway, the minimum value is `51200`. # - # For S3 File Gateway and FSx File Gateway, the minimum value is + # This field is required for S3 File Gateway, and the minimum value is # `104857600`. # # @@ -646,6 +659,10 @@ class AutomaticTapeCreationRule < Struct.new( # The average download rate limit component of the bandwidth rate # limit interval, in bits per second. This field does not appear in # the response if the download rate limit is not set. + # + # S3 File Gateway does not support this feature. + # + # # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/BandwidthRateLimitInterval AWS API Documentation @@ -1106,7 +1123,7 @@ class CreateCachediSCSIVolumeOutput < Struct.new( # # Bucket ARN: # - # `arn:aws:s3:::my-bucket/prefix/` + # `arn:aws:s3:::amzn-s3-demo-bucket/prefix/` # # Access point ARN: # @@ -1212,6 +1229,9 @@ class CreateCachediSCSIVolumeOutput < Struct.new( # `FileShareName` must be set if an S3 prefix name is set in # `LocationARN`, or if an access point or access point alias is used. # + # A valid NFS file share name can only contain the following + # characters: `a`-`z`, `A`-`Z`, `0`-`9`, `-`, `.`, and `_`. + # # # @return [String] # @@ -1390,7 +1410,7 @@ class CreateNFSFileShareOutput < Struct.new( # # Bucket ARN: # - # `arn:aws:s3:::my-bucket/prefix/` + # `arn:aws:s3:::amzn-s3-demo-bucket/prefix/` # # Access point ARN: # @@ -1543,6 +1563,10 @@ class CreateNFSFileShareOutput < Struct.new( # `FileShareName` must be set if an S3 prefix name is set in # `LocationARN`, or if an access point or access point alias is used. # + # A valid SMB file share name cannot contain the following characters: + # `[`,`]`,`#`,`;`,`<`,`>`,`:`,`"`,``,`/`,`|`,`?`,`*`,`+`, or ASCII + # control characters `1-31`. + # # # @return [String] # @@ -2884,6 +2908,15 @@ class DescribeGatewayInformationInput < Struct.new( # # @!attribute [rw] gateway_type # The type of the gateway. + # + # Amazon FSx File Gateway is no longer available to new customers. + # Existing customers of FSx File Gateway can continue to use the + # service normally. For capabilities similar to FSx File Gateway, + # visit [this blog post][1]. + # + # + # + # [1]: https://aws.amazon.com/blogs/storage/switch-your-file-share-access-from-amazon-fsx-file-gateway-to-amazon-fsx-for-windows-file-server/ # @return [String] # # @!attribute [rw] next_update_availability_date @@ -4147,6 +4180,15 @@ class FileSystemAssociationSummary < Struct.new( # # @!attribute [rw] gateway_type # The type of the gateway. + # + # Amazon FSx File Gateway is no longer available to new customers. + # Existing customers of FSx File Gateway can continue to use the + # service normally. For capabilities similar to FSx File Gateway, + # visit [this blog post][1]. + # + # + # + # [1]: https://aws.amazon.com/blogs/storage/switch-your-file-share-access-from-amazon-fsx-file-gateway-to-amazon-fsx-for-windows-file-server/ # @return [String] # # @!attribute [rw] gateway_operational_state @@ -5005,7 +5047,7 @@ class NFSFileShareDefaults < Struct.new( # # Bucket ARN: # - # `arn:aws:s3:::my-bucket/prefix/` + # `arn:aws:s3:::amzn-s3-demo-bucket/prefix/` # # Access point ARN: # @@ -5599,7 +5641,7 @@ class RetrieveTapeRecoveryPointOutput < Struct.new( # # Bucket ARN: # - # `arn:aws:s3:::my-bucket/prefix/` + # `arn:aws:s3:::amzn-s3-demo-bucket/prefix/` # # Access point ARN: # @@ -7089,6 +7131,9 @@ class UpdateMaintenanceStartTimeOutput < Struct.new( # `FileShareName` must be set if an S3 prefix name is set in # `LocationARN`, or if an access point or access point alias is used. # + # A valid NFS file share name can only contain the following + # characters: `a`-`z`, `A`-`Z`, `0`-`9`, `-`, `.`, and `_`. + # # # @return [String] # @@ -7328,6 +7373,10 @@ class UpdateNFSFileShareOutput < Struct.new( # `FileShareName` must be set if an S3 prefix name is set in # `LocationARN`, or if an access point or access point alias is used. # + # A valid SMB file share name cannot contain the following characters: + # `[`,`]`,`#`,`;`,`<`,`>`,`:`,`"`,``,`/`,`|`,`?`,`*`,`+`, or ASCII + # control characters `1-31`. + # # # @return [String] #