From 5eeba1d82c0f9e63ed38775116848055ca85043c Mon Sep 17 00:00:00 2001 From: kc Date: Wed, 3 Jan 2024 07:42:58 -0800 Subject: [PATCH] chore(storage): regenerate smithy sdk (#4290) Co-authored-by: kyle --- .../model/abort_multipart_upload_output.dart | 7 + .../model/abort_multipart_upload_request.dart | 19 +- .../src/sdk/src/s3/model/common_prefix.dart | 3 + .../complete_multipart_upload_output.dart | 43 +- .../complete_multipart_upload_request.dart | 26 +- .../s3/model/completed_multipart_upload.dart | 3 + .../src/sdk/src/s3/model/completed_part.dart | 15 +- .../sdk/src/s3/model/continuation_event.dart | 3 + .../sdk/src/s3/model/copy_object_output.dart | 36 +- .../sdk/src/s3/model/copy_object_request.dart | 246 ++++++- .../sdk/src/s3/model/copy_object_result.dart | 11 +- .../sdk/src/s3/model/copy_part_result.dart | 11 +- .../model/create_multipart_upload_output.dart | 45 +- .../create_multipart_upload_request.dart | 241 +++++- .../lib/src/sdk/src/s3/model/csv_input.dart | 3 + .../lib/src/sdk/src/s3/model/csv_output.dart | 3 + .../lib/src/sdk/src/s3/model/delete.dart | 7 +- .../src/s3/model/delete_object_output.dart | 13 +- .../src/s3/model/delete_object_request.dart | 27 +- .../src/s3/model/delete_objects_output.dart | 7 + .../src/s3/model/delete_objects_request.dart | 43 +- .../src/sdk/src/s3/model/deleted_object.dart | 11 +- .../lib/src/sdk/src/s3/model/end_event.dart | 3 + .../lib/src/sdk/src/s3/model/error.dart | 5 + .../sdk/src/s3/model/get_object_output.dart | 80 +- .../sdk/src/s3/model/get_object_request.dart | 107 ++- .../sdk/src/s3/model/head_object_output.dart | 65 +- .../sdk/src/s3/model/head_object_request.dart | 77 +- .../lib/src/sdk/src/s3/model/initiator.dart | 7 + .../sdk/src/s3/model/input_serialization.dart | 3 + .../src/s3/model/invalid_object_state.dart | 17 +- .../src/s3/model/invalid_object_state.g.dart | 11 +- .../lib/src/sdk/src/s3/model/json_input.dart | 3 + .../lib/src/sdk/src/s3/model/json_output.dart | 3 + .../model/list_multipart_uploads_output.dart | 27 + .../model/list_multipart_uploads_request.dart | 37 +- .../src/s3/model/list_objects_v2_output.dart | 43 +- .../src/s3/model/list_objects_v2_request.dart | 31 +- .../sdk/src/s3/model/list_parts_output.dart | 23 +- .../sdk/src/s3/model/list_parts_request.dart | 25 +- .../sdk/src/s3/model/multipart_upload.dart | 7 + .../src/sdk/src/s3/model/no_such_bucket.dart | 11 +- .../sdk/src/s3/model/no_such_bucket.g.dart | 12 +- .../lib/src/sdk/src/s3/model/no_such_key.dart | 11 +- .../src/sdk/src/s3/model/no_such_key.g.dart | 12 +- .../src/sdk/src/s3/model/no_such_upload.dart | 11 +- .../sdk/src/s3/model/no_such_upload.g.dart | 12 +- .../lib/src/sdk/src/s3/model/not_found.dart | 7 + .../lib/src/sdk/src/s3/model/object.dart | 12 + .../sdk/src/s3/model/object_identifier.dart | 7 +- .../object_not_in_active_tier_error.dart | 11 +- .../object_not_in_active_tier_error.g.dart | 14 +- .../src/s3/model/object_storage_class.dart | 25 +- .../src/s3/model/output_serialization.dart | 3 + .../lib/src/sdk/src/s3/model/owner.dart | 6 + .../src/sdk/src/s3/model/parquet_input.dart | 3 + .../lib/src/sdk/src/s3/model/part.dart | 7 +- .../lib/src/sdk/src/s3/model/progress.dart | 3 + .../src/sdk/src/s3/model/progress_event.dart | 3 + .../sdk/src/s3/model/put_object_output.dart | 51 +- .../sdk/src/s3/model/put_object_request.dart | 113 ++- .../src/sdk/src/s3/model/records_event.dart | 3 + .../sdk/src/s3/model/replication_status.dart | 13 +- .../src/sdk/src/s3/model/request_charged.dart | 2 + .../src/sdk/src/s3/model/request_payer.dart | 4 +- .../sdk/src/s3/model/request_progress.dart | 3 + .../src/sdk/src/s3/model/restore_status.dart | 9 + .../lib/src/sdk/src/s3/model/scan_range.dart | 3 + .../select_object_content_event_stream.dart | 4 + .../model/select_object_content_output.dart | 2 + .../model/select_object_content_request.dart | 7 +- .../lib/src/sdk/src/s3/model/stats.dart | 3 + .../lib/src/sdk/src/s3/model/stats_event.dart | 3 + .../src/sdk/src/s3/model/storage_class.dart | 25 +- .../src/s3/model/upload_part_copy_output.dart | 26 +- .../s3/model/upload_part_copy_request.dart | 80 +- .../sdk/src/s3/model/upload_part_output.dart | 33 +- .../sdk/src/s3/model/upload_part_request.dart | 30 +- .../abort_multipart_upload_operation.dart | 42 +- .../complete_multipart_upload_operation.dart | 89 ++- .../s3/operation/copy_object_operation.dart | 172 ++--- .../create_multipart_upload_operation.dart | 227 ++---- .../s3/operation/delete_object_operation.dart | 81 ++- .../operation/delete_objects_operation.dart | 81 ++- .../s3/operation/get_object_operation.dart | 143 ++-- .../s3/operation/head_object_operation.dart | 107 +-- .../list_multipart_uploads_operation.dart | 75 +- .../operation/list_objects_v2_operation.dart | 58 +- .../s3/operation/list_parts_operation.dart | 55 +- .../s3/operation/put_object_operation.dart | 93 ++- .../select_object_content_operation.dart | 19 +- .../operation/upload_part_copy_operation.dart | 131 ++-- .../s3/operation/upload_part_operation.dart | 117 ++- .../lib/src/sdk/src/s3/s3_client.dart | 688 ++++++++++-------- 94 files changed, 2905 insertions(+), 1254 deletions(-) diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/abort_multipart_upload_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/abort_multipart_upload_output.dart index 75b9efb5ee..2af69a9b59 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/abort_multipart_upload_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/abort_multipart_upload_output.dart @@ -44,12 +44,16 @@ abstract class AbortMultipartUploadOutput serializers = [AbortMultipartUploadOutputRestXmlSerializer()]; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; @override AbortMultipartUploadOutputPayload getPayload() => AbortMultipartUploadOutputPayload(); + @override List get props => [requestCharged]; + @override String toString() { final helper = newBuiltValueToStringHelper('AbortMultipartUploadOutput') @@ -77,6 +81,7 @@ abstract class AbortMultipartUploadOutputPayload @override List get props => []; + @override String toString() { final helper = @@ -97,6 +102,7 @@ class AbortMultipartUploadOutputRestXmlSerializer AbortMultipartUploadOutputPayload, _$AbortMultipartUploadOutputPayload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -104,6 +110,7 @@ class AbortMultipartUploadOutputRestXmlSerializer shape: 'restXml', ) ]; + @override AbortMultipartUploadOutputPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/abort_multipart_upload_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/abort_multipart_upload_request.dart index 3b767da3e7..066b95ba10 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/abort_multipart_upload_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/abort_multipart_upload_request.dart @@ -72,9 +72,13 @@ abstract class AbortMultipartUploadRequest /// The bucket name to which the upload was taking place. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. + /// + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// Key of the object for which the multipart upload was initiated. @@ -83,10 +87,12 @@ abstract class AbortMultipartUploadRequest /// Upload ID that identifies the multipart upload. String get uploadId; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; @override String labelFor(String key) { @@ -105,6 +111,7 @@ abstract class AbortMultipartUploadRequest @override AbortMultipartUploadRequestPayload getPayload() => AbortMultipartUploadRequestPayload(); + @override List get props => [ bucket, @@ -113,6 +120,7 @@ abstract class AbortMultipartUploadRequest requestPayer, expectedBucketOwner, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('AbortMultipartUploadRequest') @@ -156,6 +164,7 @@ abstract class AbortMultipartUploadRequestPayload @override List get props => []; + @override String toString() { final helper = @@ -176,6 +185,7 @@ class AbortMultipartUploadRequestRestXmlSerializer AbortMultipartUploadRequestPayload, _$AbortMultipartUploadRequestPayload, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -183,6 +193,7 @@ class AbortMultipartUploadRequestRestXmlSerializer shape: 'restXml', ) ]; + @override AbortMultipartUploadRequestPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/common_prefix.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/common_prefix.dart index 6266cb4d5e..942bd197c4 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/common_prefix.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/common_prefix.dart @@ -33,6 +33,7 @@ abstract class CommonPrefix String? get prefix; @override List get props => [prefix]; + @override String toString() { final helper = newBuiltValueToStringHelper('CommonPrefix') @@ -53,6 +54,7 @@ class CommonPrefixRestXmlSerializer CommonPrefix, _$CommonPrefix, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -60,6 +62,7 @@ class CommonPrefixRestXmlSerializer shape: 'restXml', ) ]; + @override CommonPrefix deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/complete_multipart_upload_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/complete_multipart_upload_output.dart index a9279dc189..ee06d3fccc 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/complete_multipart_upload_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/complete_multipart_upload_output.dart @@ -110,45 +110,55 @@ abstract class CompleteMultipartUploadOutput /// The name of the bucket that contains the newly created object. Does not return the access point ARN or access point alias if used. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. - /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// Access points are not supported by directory buckets. String? get bucket; /// The object key of the newly created object. String? get key; /// If the object expiration is configured, this will contain the expiration date (`expiry-date`) and rule ID (`rule-id`). The value of `rule-id` is URL-encoded. + /// + /// This functionality is not supported for directory buckets. String? get expiration; /// Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits. For more information about how the entity tag is calculated, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. String? get eTag; - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32; - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32C; - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha1; - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha256; /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`). + /// + /// For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. ServerSideEncryption? get serverSideEncryption; /// Version ID of the newly created object, in case the bucket has versioning turned on. + /// + /// This functionality is not supported for directory buckets. String? get versionId; - /// If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsKeyId; /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). + /// + /// This functionality is not supported for directory buckets. bool? get bucketKeyEnabled; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; @override CompleteMultipartUploadOutputPayload getPayload() => @@ -162,6 +172,7 @@ abstract class CompleteMultipartUploadOutput b.key = key; b.location = location; }); + @override List get props => [ location, @@ -179,6 +190,7 @@ abstract class CompleteMultipartUploadOutput bucketKeyEnabled, requestCharged, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('CompleteMultipartUploadOutput') @@ -257,21 +269,19 @@ abstract class CompleteMultipartUploadOutputPayload /// The name of the bucket that contains the newly created object. Does not return the access point ARN or access point alias if used. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. - /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// Access points are not supported by directory buckets. String? get bucket; - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32; - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32C; - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha1; - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha256; /// Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits. For more information about how the entity tag is calculated, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. @@ -293,6 +303,7 @@ abstract class CompleteMultipartUploadOutputPayload key, location, ]; + @override String toString() { final helper = @@ -345,6 +356,7 @@ class CompleteMultipartUploadOutputRestXmlSerializer extends _i2 CompleteMultipartUploadOutputPayload, _$CompleteMultipartUploadOutputPayload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -352,6 +364,7 @@ class CompleteMultipartUploadOutputRestXmlSerializer extends _i2 shape: 'restXml', ) ]; + @override CompleteMultipartUploadOutputPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/complete_multipart_upload_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/complete_multipart_upload_request.dart index 38256dcf51..73ca86cebc 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/complete_multipart_upload_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/complete_multipart_upload_request.dart @@ -121,9 +121,13 @@ abstract class CompleteMultipartUploadRequest /// Name of the bucket to which the multipart upload was initiated. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. + /// + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// Object key for which the multipart upload was initiated. @@ -147,19 +151,27 @@ abstract class CompleteMultipartUploadRequest /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. String? get checksumSha256; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; - /// The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is needed only when the object was created using a checksum algorithm. For more information, see [Protecting data using SSE-C keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. + /// The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is required only when the object was created using a checksum algorithm or if your bucket policy requires the use of SSE-C. For more information, see [Protecting data using SSE-C keys](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; /// The server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see [Protecting data using SSE-C keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKey; /// The MD5 server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see [Protecting data using SSE-C keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; @override String labelFor(String key) { @@ -178,6 +190,7 @@ abstract class CompleteMultipartUploadRequest @override CompletedMultipartUpload? getPayload() => multipartUpload ?? CompletedMultipartUpload(); + @override List get props => [ bucket, @@ -194,6 +207,7 @@ abstract class CompleteMultipartUploadRequest sseCustomerKey, sseCustomerKeyMd5, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('CompleteMultipartUploadRequest') @@ -263,6 +277,7 @@ class CompleteMultipartUploadRequestRestXmlSerializer CompleteMultipartUploadRequest, _$CompleteMultipartUploadRequest, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -270,6 +285,7 @@ class CompleteMultipartUploadRequestRestXmlSerializer shape: 'restXml', ) ]; + @override CompletedMultipartUpload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/completed_multipart_upload.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/completed_multipart_upload.dart index b3bcff3732..1cdff2f325 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/completed_multipart_upload.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/completed_multipart_upload.dart @@ -39,6 +39,7 @@ abstract class CompletedMultipartUpload _i2.BuiltList? get parts; @override List get props => [parts]; + @override String toString() { final helper = newBuiltValueToStringHelper('CompletedMultipartUpload') @@ -60,6 +61,7 @@ class CompletedMultipartUploadRestXmlSerializer CompletedMultipartUpload, _$CompletedMultipartUpload, ]; + @override Iterable<_i3.ShapeId> get supportedProtocols => const [ _i3.ShapeId( @@ -67,6 +69,7 @@ class CompletedMultipartUploadRestXmlSerializer shape: 'restXml', ) ]; + @override CompletedMultipartUpload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/completed_part.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/completed_part.dart index ed338eb961..184b455fa5 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/completed_part.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/completed_part.dart @@ -46,19 +46,23 @@ abstract class CompletedPart /// Entity tag returned when the part was uploaded. String? get eTag; - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32; - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32C; - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha1; - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha256; /// Part number that identifies the part. This is a positive integer between 1 and 10,000. + /// + /// * **General purpose buckets** \- In `CompleteMultipartUpload`, when a additional checksum (including `x-amz-checksum-crc32`, `x-amz-checksum-crc32c`, `x-amz-checksum-sha1`, or `x-amz-checksum-sha256`) is applied to each part, the `PartNumber` must start at 1 and the part numbers must be consecutive. Otherwise, Amazon S3 generates an HTTP `400 Bad Request` status code and an `InvalidPartOrder` error code. + /// + /// * **Directory buckets** \- In `CompleteMultipartUpload`, the `PartNumber` must start at 1 and the part numbers must be consecutive. int? get partNumber; @override List get props => [ @@ -69,6 +73,7 @@ abstract class CompletedPart checksumSha256, partNumber, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('CompletedPart') @@ -109,6 +114,7 @@ class CompletedPartRestXmlSerializer CompletedPart, _$CompletedPart, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -116,6 +122,7 @@ class CompletedPartRestXmlSerializer shape: 'restXml', ) ]; + @override CompletedPart deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/continuation_event.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/continuation_event.dart index 12c2349058..0ad239ab7d 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/continuation_event.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/continuation_event.dart @@ -30,6 +30,7 @@ abstract class ContinuationEvent @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('ContinuationEvent'); @@ -46,6 +47,7 @@ class ContinuationEventRestXmlSerializer ContinuationEvent, _$ContinuationEvent, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -53,6 +55,7 @@ class ContinuationEventRestXmlSerializer shape: 'restXml', ) ]; + @override ContinuationEvent deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_output.dart index 548a1a31ad..57fa0c2c86 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_output.dart @@ -115,36 +115,57 @@ abstract class CopyObjectOutput CopyObjectResult? get copyObjectResult; /// If the object expiration is configured, the response includes this header. + /// + /// This functionality is not supported for directory buckets. String? get expiration; - /// Version of the copied object in the destination bucket. + /// Version ID of the source object that was copied. + /// + /// This functionality is not supported when the source object is in a directory bucket. String? get copySourceVersionId; /// Version ID of the newly created copy. + /// + /// This functionality is not supported for directory buckets. String? get versionId; - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). + /// + /// For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. ServerSideEncryption? get serverSideEncryption; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsKeyId; - /// If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsEncryptionContext; /// Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). + /// + /// This functionality is not supported for directory buckets. bool? get bucketKeyEnabled; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; @override CopyObjectResult? getPayload() => copyObjectResult ?? CopyObjectResult(); + @override List get props => [ copyObjectResult, @@ -159,6 +180,7 @@ abstract class CopyObjectOutput bucketKeyEnabled, requestCharged, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('CopyObjectOutput') @@ -219,6 +241,7 @@ class CopyObjectOutputRestXmlSerializer CopyObjectOutput, _$CopyObjectOutput, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -226,6 +249,7 @@ class CopyObjectOutputRestXmlSerializer shape: 'restXml', ) ]; + @override CopyObjectResult deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_request.dart index 7cdbb23b3f..75a3c105c7 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_request.dart @@ -309,60 +309,113 @@ abstract class CopyObjectRequest static const List<_i1.SmithySerializer> serializers = [CopyObjectRequestRestXmlSerializer()]; - /// The canned ACL to apply to the object. + /// The canned access control list (ACL) to apply to the object. /// - /// This action is not supported by Amazon S3 on Outposts. + /// When you copy an object, the ACL metadata is not preserved and is set to `private` by default. Only the owner has full access control. To override the default ACL setting, specify a new ACL when you generate a copy request. For more information, see [Using ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). + /// + /// If the destination bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept `PUT` requests that don't specify an ACL or `PUT` requests that specify bucket owner full control ACLs, such as the `bucket-owner-full-control` canned ACL or an equivalent form of this ACL expressed in the XML format. For more information, see [Controlling ownership of objects and disabling ACLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) in the _Amazon S3 User Guide_. + /// + /// * If your destination bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner. + /// + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. ObjectCannedAcl? get acl; /// The name of the destination bucket. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. + /// + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; - /// Specifies caching behavior along the request/reply chain. + /// Specifies the caching behavior along the request/reply chain. String? get cacheControl; - /// Indicates the algorithm you want Amazon S3 to use to create the checksum for the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. + /// Indicates the algorithm that you want Amazon S3 to use to create the checksum for the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. + /// + /// When you copy an object, if the source object has a checksum, that checksum value will be copied to the new object by default. If the `CopyObject` request does not include this `x-amz-checksum-algorithm` header, the checksum algorithm will be copied from the source object to the destination object (if it's present on the source object). You can optionally specify a different checksum algorithm to use with the `x-amz-checksum-algorithm` header. Unrecognized or unsupported values will respond with the HTTP status code `400 Bad Request`. + /// + /// For directory buckets, when you use Amazon Web Services SDKs, `CRC32` is the default checksum algorithm that's used for performance. ChecksumAlgorithm? get checksumAlgorithm; - /// Specifies presentational information for the object. + /// Specifies presentational information for the object. Indicates whether an object should be displayed in a web browser or downloaded as a file. It allows specifying the desired filename for the downloaded file. String? get contentDisposition; /// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. + /// + /// For directory buckets, only the `aws-chunked` value is supported in this header field. String? get contentEncoding; /// The language the content is in. String? get contentLanguage; - /// A standard MIME type describing the format of the object data. + /// A standard MIME type that describes the format of the object data. String? get contentType; - /// Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an [access point](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + /// Specifies the source object for the copy operation. The source object can be up to 5 GB. If the source object is an object that was uploaded by using a multipart upload, the object copy will be a single part object after the source object is copied to the destination bucket. /// - /// * For objects not accessed through an access point, specify the name of the source bucket and the key of the source object, separated by a slash (/). For example, to copy the object `reports/january.pdf` from the bucket `awsexamplebucket`, use `awsexamplebucket/reports/january.pdf`. The value must be URL-encoded. + /// You specify the value of the copy source in one of two formats, depending on whether you want to access the source object through an [access point](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + /// + /// * For objects not accessed through an access point, specify the name of the source bucket and the key of the source object, separated by a slash (/). For example, to copy the object `reports/january.pdf` from the general purpose bucket `awsexamplebucket`, use `awsexamplebucket/reports/january.pdf`. The value must be URL-encoded. To copy the object `reports/january.pdf` from the directory bucket `awsexamplebucket--use1-az5--x-s3`, use `awsexamplebucket--use1-az5--x-s3/reports/january.pdf`. The value must be URL-encoded. /// /// * For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format `arn:aws:s3:::accesspoint//object/`. For example, to copy the object `reports/january.pdf` through access point `my-access-point` owned by account `123456789012` in Region `us-west-2`, use the URL encoding of `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. The value must be URL encoded. /// - /// Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same Amazon Web Services Region. + /// * Amazon S3 supports copy operations using Access points only when the source and destination buckets are in the same Amazon Web Services Region. + /// + /// * Access points are not supported by directory buckets. + /// /// /// Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format `arn:aws:s3-outposts:::outpost//object/`. For example, to copy the object `reports/january.pdf` through outpost `my-outpost` owned by account `123456789012` in Region `us-west-2`, use the URL encoding of `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. The value must be URL-encoded. /// /// - /// To copy a specific version of an object, append `?versionId=` to the value (for example, `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). If you don't specify a version ID, Amazon S3 copies the latest version of the source object. + /// If your source bucket versioning is enabled, the `x-amz-copy-source` header by default identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the `versionId` query parameter. Specifically, append `?versionId=` to the value (for example, `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). If you don't specify a version ID, Amazon S3 copies the latest version of the source object. + /// + /// If you enable versioning on the destination bucket, Amazon S3 generates a unique version ID for the copied object. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the `x-amz-version-id` response header in the response. + /// + /// If you do not enable versioning or suspend it on the destination bucket, the version ID that Amazon S3 generates in the `x-amz-version-id` response header is always null. + /// + /// **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. String get copySource; /// Copies the object if its entity tag (ETag) matches the specified tag. + /// + /// If both the `x-amz-copy-source-if-match` and `x-amz-copy-source-if-unmodified-since` headers are present in the request and evaluate as follows, Amazon S3 returns `200 OK` and copies the data: + /// + /// * `x-amz-copy-source-if-match` condition evaluates to true + /// + /// * `x-amz-copy-source-if-unmodified-since` condition evaluates to false String? get copySourceIfMatch; /// Copies the object if it has been modified since the specified time. + /// + /// If both the `x-amz-copy-source-if-none-match` and `x-amz-copy-source-if-modified-since` headers are present in the request and evaluate as follows, Amazon S3 returns the `412 Precondition Failed` response code: + /// + /// * `x-amz-copy-source-if-none-match` condition evaluates to false + /// + /// * `x-amz-copy-source-if-modified-since` condition evaluates to true DateTime? get copySourceIfModifiedSince; /// Copies the object if its entity tag (ETag) is different than the specified ETag. + /// + /// If both the `x-amz-copy-source-if-none-match` and `x-amz-copy-source-if-modified-since` headers are present in the request and evaluate as follows, Amazon S3 returns the `412 Precondition Failed` response code: + /// + /// * `x-amz-copy-source-if-none-match` condition evaluates to false + /// + /// * `x-amz-copy-source-if-modified-since` condition evaluates to true String? get copySourceIfNoneMatch; /// Copies the object if it hasn't been modified since the specified time. + /// + /// If both the `x-amz-copy-source-if-match` and `x-amz-copy-source-if-unmodified-since` headers are present in the request and evaluate as follows, Amazon S3 returns `200 OK` and copies the data: + /// + /// * `x-amz-copy-source-if-match` condition evaluates to true + /// + /// * `x-amz-copy-source-if-unmodified-since` condition evaluates to false DateTime? get copySourceIfUnmodifiedSince; /// The date and time at which the object is no longer cacheable. @@ -370,22 +423,30 @@ abstract class CopyObjectRequest /// Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the object. /// - /// This action is not supported by Amazon S3 on Outposts. + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantFullControl; /// Allows grantee to read the object data and its metadata. /// - /// This action is not supported by Amazon S3 on Outposts. + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantRead; /// Allows grantee to read the object ACL. /// - /// This action is not supported by Amazon S3 on Outposts. + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantReadAcp; /// Allows grantee to write the ACL for the applicable object. /// - /// This action is not supported by Amazon S3 on Outposts. + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantWriteAcp; /// The key of the destination object. @@ -394,69 +455,177 @@ abstract class CopyObjectRequest /// A map of metadata to store with the object in S3. _i3.BuiltMap? get metadata; - /// Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request. + /// Specifies whether the metadata is copied from the source object or replaced with metadata that's provided in the request. When copying an object, you can preserve all metadata (the default) or specify new metadata. If this header isn’t specified, `COPY` is the default behavior. + /// + /// **General purpose bucket** \- For general purpose buckets, when you grant permissions, you can use the `s3:x-amz-metadata-directive` condition key to enforce certain metadata behavior when objects are uploaded. For more information, see [Amazon S3 condition key examples](https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) in the _Amazon S3 User Guide_. + /// + /// `x-amz-website-redirect-location` is unique to each object and is not copied when using the `x-amz-metadata-directive` header. To copy the value, you must specify `x-amz-website-redirect-location` in the request header. MetadataDirective? get metadataDirective; - /// Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the request. + /// Specifies whether the object tag-set is copied from the source object or replaced with the tag-set that's provided in the request. + /// + /// The default value is `COPY`. + /// + /// **Directory buckets** \- For directory buckets in a `CopyObject` operation, only the empty tag-set is supported. Any requests that attempt to write non-empty tags into directory buckets will receive a `501 Not Implemented` status code. When the destination bucket is a directory bucket, you will receive a `501 Not Implemented` response in any of the following situations: + /// + /// * When you attempt to `COPY` the tag-set from an S3 source object that has non-empty tags. + /// + /// * When you attempt to `REPLACE` the tag-set of a source object and set a non-empty value to `x-amz-tagging`. + /// + /// * When you don't set the `x-amz-tagging-directive` header and the source object has non-empty tags. This is because the default value of `x-amz-tagging-directive` is `COPY`. + /// + /// + /// Because only the empty tag-set is supported for directory buckets in a `CopyObject` operation, the following situations are allowed: + /// + /// * When you attempt to `COPY` the tag-set from a directory bucket source object that has no tags to a general purpose bucket. It copies an empty tag-set to the destination object. + /// + /// * When you attempt to `REPLACE` the tag-set of a directory bucket source object and set the `x-amz-tagging` value of the directory bucket destination object to empty. + /// + /// * When you attempt to `REPLACE` the tag-set of a general purpose bucket source object that has non-empty tags and set the `x-amz-tagging` value of the directory bucket destination object to empty. + /// + /// * When you attempt to `REPLACE` the tag-set of a directory bucket source object and don't set the `x-amz-tagging` value of the directory bucket destination object. This is because the default value of `x-amz-tagging` is the empty value. TaggingDirective? get taggingDirective; - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). + /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). Unrecognized or unsupported values won’t write a destination object and will receive a `400 Bad Request` response. + /// + /// Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy. + /// + /// When you perform a `CopyObject` operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. + /// + /// With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) in the _Amazon S3 User Guide_. + /// + /// For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. ServerSideEncryption? get serverSideEncryption; - /// By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in the _Amazon S3 User Guide_. + /// If the `x-amz-storage-class` header is not used, the copied object will be stored in the `STANDARD` Storage Class by default. The `STANDARD` storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. + /// + /// * **Directory buckets** \- For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Unsupported storage class values won't write a destination object and will respond with the HTTP status code `400 Bad Request`. + /// + /// * **Amazon S3 on Outposts** \- S3 on Outposts only uses the `OUTPOSTS` Storage Class. + /// + /// + /// You can use the `CopyObject` action to change the storage class of an object that is already stored in Amazon S3 by using the `x-amz-storage-class` header. For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in the _Amazon S3 User Guide_. + /// + /// Before using an object as a source object for the copy operation, you must restore a copy of it if it meets any of the following conditions: + /// + /// * The storage class of the source object is `GLACIER` or `DEEP_ARCHIVE`. + /// + /// * The storage class of the source object is `INTELLIGENT_TIERING` and it's [S3 Intelligent-Tiering access tier](https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) is `Archive Access` or `Deep Archive Access`. + /// + /// + /// For more information, see [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) and [Copying Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) in the _Amazon S3 User Guide_. StorageClass? get storageClass; - /// If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. This value is unique to each object and is not copied when using the `x-amz-metadata-directive` header. Instead, you may opt to provide this header in combination with the directive. + /// If the destination bucket is configured as a website, redirects requests for this object copy to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. This value is unique to each object and is not copied when using the `x-amz-metadata-directive` header. Instead, you may opt to provide this header in combination with the `x-amz-metadata-directive` header. + /// + /// This functionality is not supported for directory buckets. String? get websiteRedirectLocation; - /// Specifies the algorithm to use to when encrypting the object (for example, AES256). + /// Specifies the algorithm to use when encrypting the object (for example, `AES256`). + /// + /// When you perform a `CopyObject` operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. + /// + /// This functionality is not supported when the destination bucket is a directory bucket. String? get sseCustomerAlgorithm; - /// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header. + /// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded. Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header. + /// + /// This functionality is not supported when the destination bucket is a directory bucket. String? get sseCustomerKey; /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + /// + /// This functionality is not supported when the destination bucket is a directory bucket. String? get sseCustomerKeyMd5; - /// Specifies the KMS key ID to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) in the _Amazon S3 User Guide_. + /// Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported when the destination bucket is a directory bucket. String? get ssekmsKeyId; - /// Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. + /// Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value must be explicitly added to specify encryption context for `CopyObject` requests. + /// + /// This functionality is not supported when the destination bucket is a directory bucket. String? get ssekmsEncryptionContext; - /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. + /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. /// - /// Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key. + /// Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key. + /// + /// For more information, see [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported when the destination bucket is a directory bucket. bool? get bucketKeyEnabled; - /// Specifies the algorithm to use when decrypting the source object (for example, AES256). + /// Specifies the algorithm to use when decrypting the source object (for example, `AES256`). + /// + /// If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. + /// + /// This functionality is not supported when the source object is in a directory bucket. String? get copySourceSseCustomerAlgorithm; - /// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created. + /// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be the same one that was used when the source object was created. + /// + /// If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. + /// + /// This functionality is not supported when the source object is in a directory bucket. String? get copySourceSseCustomerKey; /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + /// + /// If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. + /// + /// This functionality is not supported when the source object is in a directory bucket. String? get copySourceSseCustomerKeyMd5; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; - /// The tag-set for the object destination object this value must be used in conjunction with the `TaggingDirective`. The tag-set must be encoded as URL Query parameters. + /// The tag-set for the object copy in the destination bucket. This value must be used in conjunction with the `x-amz-tagging-directive` if you choose `REPLACE` for the `x-amz-tagging-directive`. If you choose `COPY` for the `x-amz-tagging-directive`, you don't need to set the `x-amz-tagging` header, because the tag-set will be copied from the source object directly. The tag-set must be encoded as URL Query parameters. + /// + /// The default value is the empty value. + /// + /// **Directory buckets** \- For directory buckets in a `CopyObject` operation, only the empty tag-set is supported. Any requests that attempt to write non-empty tags into directory buckets will receive a `501 Not Implemented` status code. When the destination bucket is a directory bucket, you will receive a `501 Not Implemented` response in any of the following situations: + /// + /// * When you attempt to `COPY` the tag-set from an S3 source object that has non-empty tags. + /// + /// * When you attempt to `REPLACE` the tag-set of a source object and set a non-empty value to `x-amz-tagging`. + /// + /// * When you don't set the `x-amz-tagging-directive` header and the source object has non-empty tags. This is because the default value of `x-amz-tagging-directive` is `COPY`. + /// + /// + /// Because only the empty tag-set is supported for directory buckets in a `CopyObject` operation, the following situations are allowed: + /// + /// * When you attempt to `COPY` the tag-set from a directory bucket source object that has no tags to a general purpose bucket. It copies an empty tag-set to the destination object. + /// + /// * When you attempt to `REPLACE` the tag-set of a directory bucket source object and set the `x-amz-tagging` value of the directory bucket destination object to empty. + /// + /// * When you attempt to `REPLACE` the tag-set of a general purpose bucket source object that has non-empty tags and set the `x-amz-tagging` value of the directory bucket destination object to empty. + /// + /// * When you attempt to `REPLACE` the tag-set of a directory bucket source object and don't set the `x-amz-tagging` value of the directory bucket destination object. This is because the default value of `x-amz-tagging` is the empty value. String? get tagging; - /// The Object Lock mode that you want to apply to the copied object. + /// The Object Lock mode that you want to apply to the object copy. + /// + /// This functionality is not supported for directory buckets. ObjectLockMode? get objectLockMode; - /// The date and time when you want the copied object's Object Lock to expire. + /// The date and time when you want the Object Lock of the object copy to expire. + /// + /// This functionality is not supported for directory buckets. DateTime? get objectLockRetainUntilDate; - /// Specifies whether you want to apply a legal hold to the copied object. + /// Specifies whether you want to apply a legal hold to the object copy. + /// + /// This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus? get objectLockLegalHoldStatus; - /// The account ID of the expected destination bucket owner. If the destination bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected destination bucket owner. If the account ID that you provide does not match the actual owner of the destination bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; - /// The account ID of the expected source bucket owner. If the source bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected source bucket owner. If the account ID that you provide does not match the actual owner of the source bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedSourceBucketOwner; @override String labelFor(String key) { @@ -474,6 +643,7 @@ abstract class CopyObjectRequest @override CopyObjectRequestPayload getPayload() => CopyObjectRequestPayload(); + @override List get props => [ acl, @@ -518,6 +688,7 @@ abstract class CopyObjectRequest expectedBucketOwner, expectedSourceBucketOwner, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('CopyObjectRequest') @@ -703,6 +874,7 @@ abstract class CopyObjectRequestPayload @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('CopyObjectRequestPayload'); @@ -721,6 +893,7 @@ class CopyObjectRequestRestXmlSerializer CopyObjectRequestPayload, _$CopyObjectRequestPayload, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -728,6 +901,7 @@ class CopyObjectRequestRestXmlSerializer shape: 'restXml', ) ]; + @override CopyObjectRequestPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_result.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_result.dart index aee988dac1..3dd80ecc57 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_result.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_object_result.dart @@ -49,16 +49,16 @@ abstract class CopyObjectResult /// Creation date of the object. DateTime? get lastModified; - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. String? get checksumCrc32; - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. String? get checksumCrc32C; - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. String? get checksumSha1; - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. String? get checksumSha256; @override List get props => [ @@ -69,6 +69,7 @@ abstract class CopyObjectResult checksumSha1, checksumSha256, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('CopyObjectResult') @@ -109,6 +110,7 @@ class CopyObjectResultRestXmlSerializer CopyObjectResult, _$CopyObjectResult, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -116,6 +118,7 @@ class CopyObjectResultRestXmlSerializer shape: 'restXml', ) ]; + @override CopyObjectResult deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_part_result.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_part_result.dart index c8c2ddceef..5bdfa31110 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_part_result.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/copy_part_result.dart @@ -49,16 +49,16 @@ abstract class CopyPartResult /// Date and time at which the object was uploaded. DateTime? get lastModified; - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32; - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32C; - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha1; - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha256; @override List get props => [ @@ -69,6 +69,7 @@ abstract class CopyPartResult checksumSha1, checksumSha256, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('CopyPartResult') @@ -109,6 +110,7 @@ class CopyPartResultRestXmlSerializer CopyPartResult, _$CopyPartResult, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -116,6 +118,7 @@ class CopyPartResultRestXmlSerializer shape: 'restXml', ) ]; + @override CopyPartResult deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/create_multipart_upload_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/create_multipart_upload_output.dart index de73f65ad0..fe827147df 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/create_multipart_upload_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/create_multipart_upload_output.dart @@ -119,19 +119,21 @@ abstract class CreateMultipartUploadOutput static const List<_i2.SmithySerializer> serializers = [CreateMultipartUploadOutputRestXmlSerializer()]; - /// If the bucket has a lifecycle rule configured with an action to abort incomplete multipart uploads and the prefix in the lifecycle rule matches the object name in the request, the response includes this header. The header indicates when the initiated multipart upload becomes eligible for an abort operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + /// If the bucket has a lifecycle rule configured with an action to abort incomplete multipart uploads and the prefix in the lifecycle rule matches the object name in the request, the response includes this header. The header indicates when the initiated multipart upload becomes eligible for an abort operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) in the _Amazon S3 User Guide_. /// - /// The response also includes the `x-amz-abort-rule-id` header that provides the ID of the lifecycle configuration rule that defines this action. + /// The response also includes the `x-amz-abort-rule-id` header that provides the ID of the lifecycle configuration rule that defines the abort action. + /// + /// This functionality is not supported for directory buckets. DateTime? get abortDate; /// This header is returned along with the `x-amz-abort-date` header. It identifies the applicable lifecycle configuration rule that defines the action to abort incomplete multipart uploads. + /// + /// This functionality is not supported for directory buckets. String? get abortRuleId; /// The name of the bucket to which the multipart upload was initiated. Does not return the access point ARN or access point alias if used. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. - /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// Access points are not supported by directory buckets. String? get bucket; /// Object key for which the multipart upload was initiated. @@ -140,25 +142,39 @@ abstract class CreateMultipartUploadOutput /// ID for the initiated multipart upload. String? get uploadId; - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`). + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, `AES256`, `aws:kms`). + /// + /// For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. ServerSideEncryption? get serverSideEncryption; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsKeyId; - /// If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsEncryptionContext; /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). + /// + /// This functionality is not supported for directory buckets. bool? get bucketKeyEnabled; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; /// The algorithm that was used to create a checksum of the object. @@ -170,6 +186,7 @@ abstract class CreateMultipartUploadOutput b.key = key; b.uploadId = uploadId; }); + @override List get props => [ abortDate, @@ -186,6 +203,7 @@ abstract class CreateMultipartUploadOutput requestCharged, checksumAlgorithm, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('CreateMultipartUploadOutput') @@ -260,9 +278,7 @@ abstract class CreateMultipartUploadOutputPayload /// The name of the bucket to which the multipart upload was initiated. Does not return the access point ARN or access point alias if used. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. - /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// Access points are not supported by directory buckets. String? get bucket; /// Object key for which the multipart upload was initiated. @@ -276,6 +292,7 @@ abstract class CreateMultipartUploadOutputPayload key, uploadId, ]; + @override String toString() { final helper = @@ -308,6 +325,7 @@ class CreateMultipartUploadOutputRestXmlSerializer CreateMultipartUploadOutputPayload, _$CreateMultipartUploadOutputPayload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -315,6 +333,7 @@ class CreateMultipartUploadOutputRestXmlSerializer shape: 'restXml', ) ]; + @override CreateMultipartUploadOutputPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/create_multipart_upload_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/create_multipart_upload_request.dart index 4a4e59dbd0..b0bb0c8600 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/create_multipart_upload_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/create_multipart_upload_request.dart @@ -235,16 +235,24 @@ abstract class CreateMultipartUploadRequest static const List<_i1.SmithySerializer> serializers = [CreateMultipartUploadRequestRestXmlSerializer()]; - /// The canned ACL to apply to the object. + /// The canned ACL to apply to the object. Amazon S3 supports a set of predefined ACLs, known as _canned ACLs_. Each canned ACL has a predefined set of grantees and permissions. For more information, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) in the _Amazon S3 User Guide_. /// - /// This action is not supported by Amazon S3 on Outposts. + /// By default, all objects are private. Only the owner has full access control. When uploading an object, you can grant access permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the new object. For more information, see [Using ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). One way to grant the permissions using the request headers is to specify a canned ACL with the `x-amz-acl` request header. + /// + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. ObjectCannedAcl? get acl; - /// The name of the bucket to which to initiate the upload + /// The name of the bucket where the multipart upload is initiated and where the object is uploaded. + /// + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. + /// + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// Access points and Object Lambda access points are not supported by directory buckets. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// Specifies caching behavior along the request/reply chain. @@ -254,9 +262,11 @@ abstract class CreateMultipartUploadRequest String? get contentDisposition; /// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. + /// + /// For directory buckets, only the `aws-chunked` value is supported in this header field. String? get contentEncoding; - /// The language the content is in. + /// The language that the content is in. String? get contentLanguage; /// A standard MIME type describing the format of the object data. @@ -265,24 +275,176 @@ abstract class CreateMultipartUploadRequest /// The date and time at which the object is no longer cacheable. DateTime? get expires; - /// Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the object. + /// Specify access permissions explicitly to give the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the object. + /// + /// By default, all objects are private. Only the owner has full access control. When uploading an object, you can use this header to explicitly grant access permissions to specific Amazon Web Services accounts or groups. This header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) in the _Amazon S3 User Guide_. + /// + /// You specify each grantee as a type=value pair, where the type is one of the following: + /// + /// * `id` – if the value specified is the canonical user ID of an Amazon Web Services account + /// + /// * `uri` – if you are granting permissions to a predefined group + /// + /// * `emailAddress` – if the value specified is the email address of an Amazon Web Services account + /// + /// Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions: + /// + /// * US East (N. Virginia) + /// + /// * US West (N. California) + /// + /// * US West (Oregon) /// - /// This action is not supported by Amazon S3 on Outposts. + /// * Asia Pacific (Singapore) + /// + /// * Asia Pacific (Sydney) + /// + /// * Asia Pacific (Tokyo) + /// + /// * Europe (Ireland) + /// + /// * South America (São Paulo) + /// + /// + /// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the Amazon Web Services General Reference. + /// + /// + /// For example, the following `x-amz-grant-read` header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata: + /// + /// `x-amz-grant-read: id="11112222333", id="444455556666"` + /// + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantFullControl; - /// Allows grantee to read the object data and its metadata. + /// Specify access permissions explicitly to allow grantee to read the object data and its metadata. + /// + /// By default, all objects are private. Only the owner has full access control. When uploading an object, you can use this header to explicitly grant access permissions to specific Amazon Web Services accounts or groups. This header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) in the _Amazon S3 User Guide_. + /// + /// You specify each grantee as a type=value pair, where the type is one of the following: /// - /// This action is not supported by Amazon S3 on Outposts. + /// * `id` – if the value specified is the canonical user ID of an Amazon Web Services account + /// + /// * `uri` – if you are granting permissions to a predefined group + /// + /// * `emailAddress` – if the value specified is the email address of an Amazon Web Services account + /// + /// Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions: + /// + /// * US East (N. Virginia) + /// + /// * US West (N. California) + /// + /// * US West (Oregon) + /// + /// * Asia Pacific (Singapore) + /// + /// * Asia Pacific (Sydney) + /// + /// * Asia Pacific (Tokyo) + /// + /// * Europe (Ireland) + /// + /// * South America (São Paulo) + /// + /// + /// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the Amazon Web Services General Reference. + /// + /// + /// For example, the following `x-amz-grant-read` header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata: + /// + /// `x-amz-grant-read: id="11112222333", id="444455556666"` + /// + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantRead; - /// Allows grantee to read the object ACL. + /// Specify access permissions explicitly to allows grantee to read the object ACL. + /// + /// By default, all objects are private. Only the owner has full access control. When uploading an object, you can use this header to explicitly grant access permissions to specific Amazon Web Services accounts or groups. This header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) in the _Amazon S3 User Guide_. + /// + /// You specify each grantee as a type=value pair, where the type is one of the following: + /// + /// * `id` – if the value specified is the canonical user ID of an Amazon Web Services account + /// + /// * `uri` – if you are granting permissions to a predefined group + /// + /// * `emailAddress` – if the value specified is the email address of an Amazon Web Services account + /// + /// Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions: + /// + /// * US East (N. Virginia) + /// + /// * US West (N. California) /// - /// This action is not supported by Amazon S3 on Outposts. + /// * US West (Oregon) + /// + /// * Asia Pacific (Singapore) + /// + /// * Asia Pacific (Sydney) + /// + /// * Asia Pacific (Tokyo) + /// + /// * Europe (Ireland) + /// + /// * South America (São Paulo) + /// + /// + /// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the Amazon Web Services General Reference. + /// + /// + /// For example, the following `x-amz-grant-read` header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata: + /// + /// `x-amz-grant-read: id="11112222333", id="444455556666"` + /// + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantReadAcp; - /// Allows grantee to write the ACL for the applicable object. + /// Specify access permissions explicitly to allows grantee to allow grantee to write the ACL for the applicable object. + /// + /// By default, all objects are private. Only the owner has full access control. When uploading an object, you can use this header to explicitly grant access permissions to specific Amazon Web Services accounts or groups. This header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) in the _Amazon S3 User Guide_. + /// + /// You specify each grantee as a type=value pair, where the type is one of the following: + /// + /// * `id` – if the value specified is the canonical user ID of an Amazon Web Services account + /// + /// * `uri` – if you are granting permissions to a predefined group + /// + /// * `emailAddress` – if the value specified is the email address of an Amazon Web Services account + /// + /// Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions: + /// + /// * US East (N. Virginia) + /// + /// * US West (N. California) + /// + /// * US West (Oregon) + /// + /// * Asia Pacific (Singapore) + /// + /// * Asia Pacific (Sydney) + /// + /// * Asia Pacific (Tokyo) + /// + /// * Europe (Ireland) + /// + /// * South America (São Paulo) /// - /// This action is not supported by Amazon S3 on Outposts. + /// + /// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the Amazon Web Services General Reference. + /// + /// + /// For example, the following `x-amz-grant-read` header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata: + /// + /// `x-amz-grant-read: id="11112222333", id="444455556666"` + /// + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantWriteAcp; /// Object key for which the multipart upload is to be initiated. @@ -291,54 +453,84 @@ abstract class CreateMultipartUploadRequest /// A map of metadata to store with the object in S3. _i3.BuiltMap? get metadata; - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`). + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, `AES256`, `aws:kms`). + /// + /// For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. ServerSideEncryption? get serverSideEncryption; - /// By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in the _Amazon S3 User Guide_. + /// By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in the _Amazon S3 User Guide_. + /// + /// * For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. + /// + /// * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass? get storageClass; /// If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. + /// + /// This functionality is not supported for directory buckets. String? get websiteRedirectLocation; - /// Specifies the algorithm to use to when encrypting the object (for example, AES256). + /// Specifies the algorithm to use when encrypting the object (for example, AES256). + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; /// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKey; - /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + /// Specifies the 128-bit MD5 digest of the customer-provided encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// Specifies the ID of the symmetric encryption customer managed key to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) in the _Amazon S3 User Guide_. + /// Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption customer managed key to use for object encryption. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsKeyId; /// Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsEncryptionContext; /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. /// /// Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key. + /// + /// This functionality is not supported for directory buckets. bool? get bucketKeyEnabled; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; /// The tag-set for the object. The tag-set must be encoded as URL Query parameters. + /// + /// This functionality is not supported for directory buckets. String? get tagging; /// Specifies the Object Lock mode that you want to apply to the uploaded object. + /// + /// This functionality is not supported for directory buckets. ObjectLockMode? get objectLockMode; /// Specifies the date and time when you want the Object Lock to expire. + /// + /// This functionality is not supported for directory buckets. DateTime? get objectLockRetainUntilDate; /// Specifies whether you want to apply a legal hold to the uploaded object. + /// + /// This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus? get objectLockLegalHoldStatus; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; - /// Indicates the algorithm you want Amazon S3 to use to create the checksum for the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. + /// Indicates the algorithm that you want Amazon S3 to use to create the checksum for the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. ChecksumAlgorithm? get checksumAlgorithm; @override String labelFor(String key) { @@ -357,6 +549,7 @@ abstract class CreateMultipartUploadRequest @override CreateMultipartUploadRequestPayload getPayload() => CreateMultipartUploadRequestPayload(); + @override List get props => [ acl, @@ -390,6 +583,7 @@ abstract class CreateMultipartUploadRequest expectedBucketOwner, checksumAlgorithm, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('CreateMultipartUploadRequest') @@ -533,6 +727,7 @@ abstract class CreateMultipartUploadRequestPayload @override List get props => []; + @override String toString() { final helper = @@ -553,6 +748,7 @@ class CreateMultipartUploadRequestRestXmlSerializer extends _i1 CreateMultipartUploadRequestPayload, _$CreateMultipartUploadRequestPayload, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -560,6 +756,7 @@ class CreateMultipartUploadRequestRestXmlSerializer extends _i1 shape: 'restXml', ) ]; + @override CreateMultipartUploadRequestPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/csv_input.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/csv_input.dart index eed7d0d3b0..9070bd7bde 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/csv_input.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/csv_input.dart @@ -89,6 +89,7 @@ abstract class CsvInput quoteCharacter, allowQuotedRecordDelimiter, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('CsvInput') @@ -133,6 +134,7 @@ class CsvInputRestXmlSerializer CsvInput, _$CsvInput, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -140,6 +142,7 @@ class CsvInputRestXmlSerializer shape: 'restXml', ) ]; + @override CsvInput deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/csv_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/csv_output.dart index 930cdbb13e..262cf84685 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/csv_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/csv_output.dart @@ -68,6 +68,7 @@ abstract class CsvOutput fieldDelimiter, quoteCharacter, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('CsvOutput') @@ -104,6 +105,7 @@ class CsvOutputRestXmlSerializer CsvOutput, _$CsvOutput, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -111,6 +113,7 @@ class CsvOutputRestXmlSerializer shape: 'restXml', ) ]; + @override CsvOutput deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete.dart index 7c80d02c0c..ece0653835 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete.dart @@ -37,15 +37,18 @@ abstract class Delete ]; /// The object to delete. + /// + /// **Directory buckets** \- For directory buckets, an object that's composed entirely of whitespace characters is not supported by the `DeleteObjects` API operation. The request will receive a `400 Bad Request` error and none of the objects in the request will be deleted. _i2.BuiltList get objects; - /// Element to enable quiet mode for the request. When you add this element, you must set its value to true. + /// Element to enable quiet mode for the request. When you add this element, you must set its value to `true`. bool? get quiet; @override List get props => [ objects, quiet, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('Delete') @@ -69,6 +72,7 @@ class DeleteRestXmlSerializer extends _i3.StructuredSmithySerializer { Delete, _$Delete, ]; + @override Iterable<_i3.ShapeId> get supportedProtocols => const [ _i3.ShapeId( @@ -76,6 +80,7 @@ class DeleteRestXmlSerializer extends _i3.StructuredSmithySerializer { shape: 'restXml', ) ]; + @override Delete deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_object_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_object_output.dart index a00441b065..27b8f41cc4 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_object_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_object_output.dart @@ -57,22 +57,30 @@ abstract class DeleteObjectOutput static const List<_i2.SmithySerializer> serializers = [DeleteObjectOutputRestXmlSerializer()]; - /// Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker. + /// Indicates whether the specified object version that was permanently deleted was (true) or was not (false) a delete marker before deletion. In a simple DELETE, this header indicates whether (true) or not (false) the current version of the object is a delete marker. + /// + /// This functionality is not supported for directory buckets. bool? get deleteMarker; /// Returns the version ID of the delete marker created as a result of the DELETE operation. + /// + /// This functionality is not supported for directory buckets. String? get versionId; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; @override DeleteObjectOutputPayload getPayload() => DeleteObjectOutputPayload(); + @override List get props => [ deleteMarker, versionId, requestCharged, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('DeleteObjectOutput') @@ -106,6 +114,7 @@ abstract class DeleteObjectOutputPayload @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('DeleteObjectOutputPayload'); @@ -124,6 +133,7 @@ class DeleteObjectOutputRestXmlSerializer DeleteObjectOutputPayload, _$DeleteObjectOutputPayload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -131,6 +141,7 @@ class DeleteObjectOutputRestXmlSerializer shape: 'restXml', ) ]; + @override DeleteObjectOutputPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_object_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_object_request.dart index dcd1048093..c4e9dfc31c 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_object_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_object_request.dart @@ -83,27 +83,39 @@ abstract class DeleteObjectRequest /// The bucket name of the bucket containing the object. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. + /// + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// Key name of the object to delete. String get key; /// The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently delete a versioned object if versioning is configured with MFA delete enabled. + /// + /// This functionality is not supported for directory buckets. String? get mfa; - /// VersionId used to reference a specific version of the object. + /// Version ID used to reference a specific version of the object. + /// + /// For directory buckets in this API operation, only the `null` value of the version ID is supported. String? get versionId; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; /// Indicates whether S3 Object Lock should bypass Governance-mode restrictions to process this operation. To use this header, you must have the `s3:BypassGovernanceRetention` permission. + /// + /// This functionality is not supported for directory buckets. bool? get bypassGovernanceRetention; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; @override String labelFor(String key) { @@ -121,6 +133,7 @@ abstract class DeleteObjectRequest @override DeleteObjectRequestPayload getPayload() => DeleteObjectRequestPayload(); + @override List get props => [ bucket, @@ -131,6 +144,7 @@ abstract class DeleteObjectRequest bypassGovernanceRetention, expectedBucketOwner, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('DeleteObjectRequest') @@ -180,6 +194,7 @@ abstract class DeleteObjectRequestPayload @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('DeleteObjectRequestPayload'); @@ -198,6 +213,7 @@ class DeleteObjectRequestRestXmlSerializer DeleteObjectRequestPayload, _$DeleteObjectRequestPayload, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -205,6 +221,7 @@ class DeleteObjectRequestRestXmlSerializer shape: 'restXml', ) ]; + @override DeleteObjectRequestPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_objects_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_objects_output.dart index fdb56d5014..2bbe1df997 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_objects_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_objects_output.dart @@ -63,6 +63,8 @@ abstract class DeleteObjectsOutput _i3.BuiltList? get deleted; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; /// Container for a failed delete action that describes the object that Amazon S3 attempted to delete and the error it encountered. @@ -76,12 +78,14 @@ abstract class DeleteObjectsOutput b.errors.replace(errors!); } }); + @override List get props => [ deleted, requestCharged, errors, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('DeleteObjectsOutput') @@ -122,6 +126,7 @@ abstract class DeleteObjectsOutputPayload deleted, errors, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('DeleteObjectsOutputPayload') @@ -148,6 +153,7 @@ class DeleteObjectsOutputRestXmlSerializer DeleteObjectsOutputPayload, _$DeleteObjectsOutputPayload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -155,6 +161,7 @@ class DeleteObjectsOutputRestXmlSerializer shape: 'restXml', ) ]; + @override DeleteObjectsOutputPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_objects_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_objects_request.dart index 576a670bbe..c91dd069c6 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_objects_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/delete_objects_request.dart @@ -83,31 +83,56 @@ abstract class DeleteObjectsRequest /// The bucket name containing the objects to delete. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. + /// + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// Container for the request. Delete get delete; /// The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently delete a versioned object if versioning is configured with MFA delete enabled. + /// + /// When performing the `DeleteObjects` operation on an MFA delete enabled bucket, which attempts to delete the specified versioned objects, you must include an MFA token. If you don't provide an MFA token, the entire request will fail, even if there are non-versioned objects that you are trying to delete. If you provide an invalid token, whether there are versioned object keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see [MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get mfa; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; /// Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. To use this header, you must have the `s3:BypassGovernanceRetention` permission. + /// + /// This functionality is not supported for directory buckets. bool? get bypassGovernanceRetention; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; - /// Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding `x-amz-checksum` or `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request with the HTTP status code `400 Bad Request`. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. + /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding `x-amz-checksum-_algorithm_` or `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request with the HTTP status code `400 Bad Request`. /// - /// If you provide an individual checksum, Amazon S3 ignores any provided `ChecksumAlgorithm` parameter. + /// For the `x-amz-checksum-_algorithm_` header, replace `_algorithm_` with the supported algorithm from the following list: + /// + /// * CRC32 + /// + /// * CRC32C + /// + /// * SHA1 /// - /// This checksum algorithm must be the same for all parts and it match the checksum value supplied in the `CreateMultipartUpload` request. + /// * SHA256 + /// + /// + /// For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. + /// + /// If the individual checksum value you provide through `x-amz-checksum-_algorithm_` doesn't match the checksum algorithm you set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any provided `ChecksumAlgorithm` parameter and uses the checksum algorithm that matches the provided value in `x-amz-checksum-_algorithm_` . + /// + /// If you provide an individual checksum, Amazon S3 ignores any provided `ChecksumAlgorithm` parameter. ChecksumAlgorithm? get checksumAlgorithm; @override String labelFor(String key) { @@ -123,6 +148,7 @@ abstract class DeleteObjectsRequest @override Delete getPayload() => delete; + @override List get props => [ bucket, @@ -133,6 +159,7 @@ abstract class DeleteObjectsRequest expectedBucketOwner, checksumAlgorithm, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('DeleteObjectsRequest') @@ -177,6 +204,7 @@ class DeleteObjectsRequestRestXmlSerializer DeleteObjectsRequest, _$DeleteObjectsRequest, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -184,6 +212,7 @@ class DeleteObjectsRequestRestXmlSerializer shape: 'restXml', ) ]; + @override Delete deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/deleted_object.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/deleted_object.dart index 28bb18b805..2f333fc8e1 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/deleted_object.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/deleted_object.dart @@ -43,12 +43,18 @@ abstract class DeletedObject String? get key; /// The version ID of the deleted object. + /// + /// This functionality is not supported for directory buckets. String? get versionId; - /// Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker. In a simple DELETE, this header indicates whether (true) or not (false) a delete marker was created. + /// Indicates whether the specified object version that was permanently deleted was (true) or was not (false) a delete marker before deletion. In a simple DELETE, this header indicates whether (true) or not (false) the current version of the object is a delete marker. + /// + /// This functionality is not supported for directory buckets. bool? get deleteMarker; /// The version ID of the delete marker created as a result of the DELETE operation. If you delete a specific object version, the value returned by this header is the version ID of the object version deleted. + /// + /// This functionality is not supported for directory buckets. String? get deleteMarkerVersionId; @override List get props => [ @@ -57,6 +63,7 @@ abstract class DeletedObject deleteMarker, deleteMarkerVersionId, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('DeletedObject') @@ -89,6 +96,7 @@ class DeletedObjectRestXmlSerializer DeletedObject, _$DeletedObject, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -96,6 +104,7 @@ class DeletedObjectRestXmlSerializer shape: 'restXml', ) ]; + @override DeletedObject deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/end_event.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/end_event.dart index 636b7681cd..06bcfb0d3b 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/end_event.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/end_event.dart @@ -30,6 +30,7 @@ abstract class EndEvent @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('EndEvent'); @@ -46,6 +47,7 @@ class EndEventRestXmlSerializer EndEvent, _$EndEvent, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -53,6 +55,7 @@ class EndEventRestXmlSerializer shape: 'restXml', ) ]; + @override EndEvent deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/error.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/error.dart index a5bc879ccc..b80c6cbae4 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/error.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/error.dart @@ -42,6 +42,8 @@ abstract class Error String? get key; /// The version ID of the error. + /// + /// This functionality is not supported for directory buckets. String? get versionId; /// The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type. The following is a list of Amazon S3 error codes. For more information, see [Error responses](https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). @@ -760,6 +762,7 @@ abstract class Error code, message, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('Error') @@ -791,6 +794,7 @@ class ErrorRestXmlSerializer extends _i2.StructuredSmithySerializer { Error, _$Error, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -798,6 +802,7 @@ class ErrorRestXmlSerializer extends _i2.StructuredSmithySerializer { shape: 'restXml', ) ]; + @override Error deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/get_object_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/get_object_output.dart index 36fbdbbb1b..daec10cbee 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/get_object_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/get_object_output.dart @@ -269,19 +269,29 @@ abstract class GetObjectOutput /// Object data. _i3.Stream> get body; - /// Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response. + /// Indicates whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response. + /// + /// * If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes `x-amz-delete-marker: true` in the response. + /// + /// * If the specified version in the request is a delete marker, the response returns a `405 Method Not Allowed` error and the `Last-Modified: timestamp` response header. bool? get deleteMarker; - /// Indicates that a range of bytes was specified. + /// Indicates that a range of bytes was specified in the request. String? get acceptRanges; - /// If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the `expiry-date` and `rule-id` key-value pairs providing object expiration information. The value of the `rule-id` is URL-encoded. + /// If the object expiration is configured (see [`PutBucketLifecycleConfiguration`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) ), the response includes this header. It includes the `expiry-date` and `rule-id` key-value pairs providing object expiration information. The value of the `rule-id` is URL-encoded. + /// + /// This functionality is not supported for directory buckets. String? get expiration; /// Provides information about object restoration action and expiration time of the restored object copy. + /// + /// This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects. String? get restore; - /// Creation date of the object. + /// Date and time when the object was last modified. + /// + /// **General purpose buckets** \- When you specify a `versionId` of the object in your request, if the specified version in the request is a delete marker, the response returns a `405 Method Not Allowed` error and the `Last-Modified: timestamp` response header. DateTime? get lastModified; /// Size of the body in bytes. @@ -290,22 +300,26 @@ abstract class GetObjectOutput /// An entity tag (ETag) is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. String? get eTag; - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. String? get checksumCrc32; - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. String? get checksumCrc32C; - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. String? get checksumSha1; - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. String? get checksumSha256; - /// This is set to the number of metadata entries not returned in `x-amz-meta` headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers. + /// This is set to the number of metadata entries not returned in the headers that are prefixed with `x-amz-meta-`. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers. + /// + /// This functionality is not supported for directory buckets. int? get missingMeta; - /// Version of the object. + /// Version ID of the object. + /// + /// This functionality is not supported for directory buckets. String? get versionId; /// Specifies caching behavior along the request/reply chain. @@ -314,7 +328,7 @@ abstract class GetObjectOutput /// Specifies presentational information for the object. String? get contentDisposition; - /// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. + /// Indicates what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. String? get contentEncoding; /// The language the content is in. @@ -330,51 +344,80 @@ abstract class GetObjectOutput DateTime? get expires; /// If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. + /// + /// This functionality is not supported for directory buckets. String? get websiteRedirectLocation; - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). + /// + /// For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. ServerSideEncryption? get serverSideEncryption; /// A map of metadata to store with the object in S3. _i5.BuiltMap? get metadata; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsKeyId; /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). + /// + /// This functionality is not supported for directory buckets. bool? get bucketKeyEnabled; /// Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects. + /// + /// **Directory buckets** \- Only the S3 Express One Zone storage class is supported by directory buckets to store objects. StorageClass? get storageClass; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; /// Amazon S3 can return this if your request involves a bucket that is either a source or destination in a replication rule. + /// + /// This functionality is not supported for directory buckets. ReplicationStatus? get replicationStatus; /// The count of parts this object has. This value is only returned if you specify `partNumber` in your request and the object was uploaded as a multipart upload. int? get partsCount; - /// The number of tags, if any, on the object. + /// The number of tags, if any, on the object, when you have the relevant permission to read object tags. + /// + /// You can use [GetObjectTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) to retrieve the tag set associated with an object. + /// + /// This functionality is not supported for directory buckets. int? get tagCount; - /// The Object Lock mode currently in place for this object. + /// The Object Lock mode that's currently in place for this object. + /// + /// This functionality is not supported for directory buckets. ObjectLockMode? get objectLockMode; /// The date and time when this object's Object Lock will expire. + /// + /// This functionality is not supported for directory buckets. DateTime? get objectLockRetainUntilDate; /// Indicates whether this object has an active legal hold. This field is only returned if you have permission to view an object's legal hold status. + /// + /// This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus? get objectLockLegalHoldStatus; @override _i3.Stream> getPayload() => body; + @override List get props => [ body, @@ -414,6 +457,7 @@ abstract class GetObjectOutput objectLockRetainUntilDate, objectLockLegalHoldStatus, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('GetObjectOutput') @@ -574,6 +618,7 @@ class GetObjectOutputRestXmlSerializer GetObjectOutput, _$GetObjectOutput, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -581,6 +626,7 @@ class GetObjectOutputRestXmlSerializer shape: 'restXml', ) ]; + @override _i3.Stream> deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/get_object_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/get_object_request.dart index 97c4688aee..fe3de40be9 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/get_object_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/get_object_request.dart @@ -174,29 +174,49 @@ abstract class GetObjectRequest /// The bucket name containing the object. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. /// - /// When using an Object Lambda access point the hostname takes the form _AccessPointName_-_AccountId_.s3-object-lambda._Region_.amazonaws.com. + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **Object Lambda access points** \- When you use this action with an Object Lambda access point, you must direct requests to the Object Lambda access point hostname. The Object Lambda access point hostname takes the form _AccessPointName_-_AccountId_.s3-object-lambda._Region_.amazonaws.com. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. + /// + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; - /// Return the object only if its entity tag (ETag) is the same as the one specified; otherwise, return a 412 (precondition failed) error. + /// Return the object only if its entity tag (ETag) is the same as the one specified in this header; otherwise, return a `412 Precondition Failed` error. + /// + /// If both of the `If-Match` and `If-Unmodified-Since` headers are present in the request as follows: `If-Match` condition evaluates to `true`, and; `If-Unmodified-Since` condition evaluates to `false`; then, S3 returns `200 OK` and the data requested. + /// + /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). String? get ifMatch; - /// Return the object only if it has been modified since the specified time; otherwise, return a 304 (not modified) error. + /// Return the object only if it has been modified since the specified time; otherwise, return a `304 Not Modified` error. + /// + /// If both of the `If-None-Match` and `If-Modified-Since` headers are present in the request as follows: `If-None-Match` condition evaluates to `false`, and; `If-Modified-Since` condition evaluates to `true`; then, S3 returns `304 Not Modified` status code. + /// + /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). DateTime? get ifModifiedSince; - /// Return the object only if its entity tag (ETag) is different from the one specified; otherwise, return a 304 (not modified) error. + /// Return the object only if its entity tag (ETag) is different from the one specified in this header; otherwise, return a `304 Not Modified` error. + /// + /// If both of the `If-None-Match` and `If-Modified-Since` headers are present in the request as follows: `If-None-Match` condition evaluates to `false`, and; `If-Modified-Since` condition evaluates to `true`; then, S3 returns `304 Not Modified` HTTP status code. + /// + /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). String? get ifNoneMatch; - /// Return the object only if it has not been modified since the specified time; otherwise, return a 412 (precondition failed) error. + /// Return the object only if it has not been modified since the specified time; otherwise, return a `412 Precondition Failed` error. + /// + /// If both of the `If-Match` and `If-Unmodified-Since` headers are present in the request as follows: `If-Match` condition evaluates to `true`, and; `If-Unmodified-Since` condition evaluates to `false`; then, S3 returns `200 OK` and the data requested. + /// + /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). DateTime? get ifUnmodifiedSince; /// Key of the object to get. String get key; - /// Downloads the specified range bytes of an object. For more information about the HTTP Range header, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-range](https://www.rfc-editor.org/rfc/rfc9110.html#name-range). + /// Downloads the specified byte range of an object. For more information about the HTTP Range header, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-range](https://www.rfc-editor.org/rfc/rfc9110.html#name-range). /// /// Amazon S3 doesn't support retrieving multiple ranges of data per `GET` request. String? get range; @@ -204,7 +224,7 @@ abstract class GetObjectRequest /// Sets the `Cache-Control` header of the response. String? get responseCacheControl; - /// Sets the `Content-Disposition` header of the response + /// Sets the `Content-Disposition` header of the response. String? get responseContentDisposition; /// Sets the `Content-Encoding` header of the response. @@ -219,25 +239,77 @@ abstract class GetObjectRequest /// Sets the `Expires` header of the response. DateTime? get responseExpires; - /// VersionId used to reference a specific version of the object. + /// Version ID used to reference a specific version of the object. + /// + /// By default, the `GetObject` operation returns the current version of an object. To return a different version, use the `versionId` subresource. + /// + /// * If you include a `versionId` in your request header, you must have the `s3:GetObjectVersion` permission to access a specific version of an object. The `s3:GetObject` permission is not required in this scenario. + /// + /// * If you request the current version of an object without a specific `versionId` in the request header, only the `s3:GetObject` permission is required. The `s3:GetObjectVersion` permission is not required in this scenario. + /// + /// * **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the `null` value of the version ID is supported by directory buckets. You can only specify `null` to the `versionId` query parameter in the request. + /// + /// + /// For more information about versioning, see [PutBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). String? get versionId; - /// Specifies the algorithm to use to when decrypting the object (for example, AES256). + /// Specifies the algorithm to use when decrypting the object (for example, `AES256`). + /// + /// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers: + /// + /// * `x-amz-server-side-encryption-customer-algorithm` + /// + /// * `x-amz-server-side-encryption-customer-key` + /// + /// * `x-amz-server-side-encryption-customer-key-MD5` + /// + /// + /// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; - /// Specifies the customer-provided encryption key for Amazon S3 used to encrypt the data. This value is used to decrypt the object when recovering it and must match the one used when storing the data. The key must be appropriate for use with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header. + /// Specifies the customer-provided encryption key that you originally provided for Amazon S3 to encrypt the data before storing it. This value is used to decrypt the object when recovering it and must match the one used when storing the data. The key must be appropriate for use with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header. + /// + /// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers: + /// + /// * `x-amz-server-side-encryption-customer-algorithm` + /// + /// * `x-amz-server-side-encryption-customer-key` + /// + /// * `x-amz-server-side-encryption-customer-key-MD5` + /// + /// + /// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKey; - /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + /// Specifies the 128-bit MD5 digest of the customer-provided encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + /// + /// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers: + /// + /// * `x-amz-server-side-encryption-customer-algorithm` + /// + /// * `x-amz-server-side-encryption-customer-key` + /// + /// * `x-amz-server-side-encryption-customer-key-MD5` + /// + /// + /// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; /// Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. Useful for downloading just a part of an object. int? get partNumber; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; /// To retrieve the checksum, this mode must be enabled. @@ -258,6 +330,7 @@ abstract class GetObjectRequest @override GetObjectRequestPayload getPayload() => GetObjectRequestPayload(); + @override List get props => [ bucket, @@ -282,6 +355,7 @@ abstract class GetObjectRequest expectedBucketOwner, checksumMode, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('GetObjectRequest') @@ -387,6 +461,7 @@ abstract class GetObjectRequestPayload @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('GetObjectRequestPayload'); @@ -405,6 +480,7 @@ class GetObjectRequestRestXmlSerializer GetObjectRequestPayload, _$GetObjectRequestPayload, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -412,6 +488,7 @@ class GetObjectRequestRestXmlSerializer shape: 'restXml', ) ]; + @override GetObjectRequestPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/head_object_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/head_object_output.dart index 88b53f9e9c..e33bc5f1c9 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/head_object_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/head_object_output.dart @@ -254,12 +254,16 @@ abstract class HeadObjectOutput [HeadObjectOutputRestXmlSerializer()]; /// Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response. + /// + /// This functionality is not supported for directory buckets. bool? get deleteMarker; /// Indicates that a range of bytes was specified. String? get acceptRanges; - /// If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the `expiry-date` and `rule-id` key-value pairs providing object expiration information. The value of the `rule-id` is URL-encoded. + /// If the object expiration is configured (see [`PutBucketLifecycleConfiguration`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) ), the response includes this header. It includes the `expiry-date` and `rule-id` key-value pairs providing object expiration information. The value of the `rule-id` is URL-encoded. + /// + /// This functionality is not supported for directory buckets. String? get expiration; /// If the object is an archived object (an object whose storage class is GLACIER), the response includes this header if either the archive restoration is in progress (see [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) or an archive copy is already restored. @@ -271,36 +275,44 @@ abstract class HeadObjectOutput /// If the object restoration is in progress, the header returns the value `ongoing-request="true"`. /// /// For more information about archiving objects, see [Transitioning Objects: General Considerations](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + /// + /// This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects. String? get restore; /// The archive state of the head object. + /// + /// This functionality is not supported for directory buckets. ArchiveStatus? get archiveStatus; - /// Creation date of the object. + /// Date and time when the object was last modified. DateTime? get lastModified; /// Size of the body in bytes. _i3.Int64? get contentLength; - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32; - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32C; - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha1; - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha256; /// An entity tag (ETag) is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. String? get eTag; /// This is set to the number of metadata entries not returned in `x-amz-meta` headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers. + /// + /// This functionality is not supported for directory buckets. int? get missingMeta; - /// Version of the object. + /// Version ID of the object. + /// + /// This functionality is not supported for directory buckets. String? get versionId; /// Specifies caching behavior along the request/reply chain. @@ -309,7 +321,7 @@ abstract class HeadObjectOutput /// Specifies presentational information for the object. String? get contentDisposition; - /// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. + /// Indicates what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. String? get contentEncoding; /// The language the content is in. @@ -322,32 +334,48 @@ abstract class HeadObjectOutput DateTime? get expires; /// If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. + /// + /// This functionality is not supported for directory buckets. String? get websiteRedirectLocation; - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). + /// + /// For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. ServerSideEncryption? get serverSideEncryption; /// A map of metadata to store with the object in S3. _i4.BuiltMap? get metadata; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsKeyId; /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). + /// + /// This functionality is not supported for directory buckets. bool? get bucketKeyEnabled; /// Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects. /// /// For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + /// + /// **Directory buckets** \- Only the S3 Express One Zone storage class is supported by directory buckets to store objects. StorageClass? get storageClass; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; /// Amazon S3 can return this header if your request involves a bucket that is either a source or a destination in a replication rule. @@ -364,21 +392,30 @@ abstract class HeadObjectOutput /// /// /// For more information, see [Replication](https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + /// + /// This functionality is not supported for directory buckets. ReplicationStatus? get replicationStatus; /// The count of parts this object has. This value is only returned if you specify `partNumber` in your request and the object was uploaded as a multipart upload. int? get partsCount; /// The Object Lock mode, if any, that's in effect for this object. This header is only returned if the requester has the `s3:GetObjectRetention` permission. For more information about S3 Object Lock, see [Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + /// + /// This functionality is not supported for directory buckets. ObjectLockMode? get objectLockMode; /// The date and time when the Object Lock retention period expires. This header is only returned if the requester has the `s3:GetObjectRetention` permission. + /// + /// This functionality is not supported for directory buckets. DateTime? get objectLockRetainUntilDate; /// Specifies whether a legal hold is in effect for this object. This header is only returned if the requester has the `s3:GetObjectLegalHold` permission. This header is not returned if the specified version of this object has never had a legal hold applied. For more information about S3 Object Lock, see [Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + /// + /// This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus? get objectLockLegalHoldStatus; @override HeadObjectOutputPayload getPayload() => HeadObjectOutputPayload(); + @override List get props => [ deleteMarker, @@ -416,6 +453,7 @@ abstract class HeadObjectOutput objectLockRetainUntilDate, objectLockLegalHoldStatus, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('HeadObjectOutput') @@ -573,6 +611,7 @@ abstract class HeadObjectOutputPayload @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('HeadObjectOutputPayload'); @@ -591,6 +630,7 @@ class HeadObjectOutputRestXmlSerializer HeadObjectOutputPayload, _$HeadObjectOutputPayload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -598,6 +638,7 @@ class HeadObjectOutputRestXmlSerializer shape: 'restXml', ) ]; + @override HeadObjectOutputPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/head_object_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/head_object_request.dart index 35594f7af5..3ab4e24dac 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/head_object_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/head_object_request.dart @@ -134,23 +134,71 @@ abstract class HeadObjectRequest static const List<_i1.SmithySerializer> serializers = [HeadObjectRequestRestXmlSerializer()]; - /// The name of the bucket containing the object. + /// The name of the bucket that contains the object. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. + /// + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// Return the object only if its entity tag (ETag) is the same as the one specified; otherwise, return a 412 (precondition failed) error. + /// + /// If both of the `If-Match` and `If-Unmodified-Since` headers are present in the request as follows: + /// + /// * `If-Match` condition evaluates to `true`, and; + /// + /// * `If-Unmodified-Since` condition evaluates to `false`; + /// + /// + /// Then Amazon S3 returns `200 OK` and the data requested. + /// + /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). String? get ifMatch; /// Return the object only if it has been modified since the specified time; otherwise, return a 304 (not modified) error. + /// + /// If both of the `If-None-Match` and `If-Modified-Since` headers are present in the request as follows: + /// + /// * `If-None-Match` condition evaluates to `false`, and; + /// + /// * `If-Modified-Since` condition evaluates to `true`; + /// + /// + /// Then Amazon S3 returns the `304 Not Modified` response code. + /// + /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). DateTime? get ifModifiedSince; /// Return the object only if its entity tag (ETag) is different from the one specified; otherwise, return a 304 (not modified) error. + /// + /// If both of the `If-None-Match` and `If-Modified-Since` headers are present in the request as follows: + /// + /// * `If-None-Match` condition evaluates to `false`, and; + /// + /// * `If-Modified-Since` condition evaluates to `true`; + /// + /// + /// Then Amazon S3 returns the `304 Not Modified` response code. + /// + /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). String? get ifNoneMatch; /// Return the object only if it has not been modified since the specified time; otherwise, return a 412 (precondition failed) error. + /// + /// If both of the `If-Match` and `If-Unmodified-Since` headers are present in the request as follows: + /// + /// * `If-Match` condition evaluates to `true`, and; + /// + /// * `If-Unmodified-Since` condition evaluates to `false`; + /// + /// + /// Then Amazon S3 returns `200 OK` and the data requested. + /// + /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). DateTime? get ifUnmodifiedSince; /// The object key. @@ -159,25 +207,35 @@ abstract class HeadObjectRequest /// HeadObject returns only the metadata for an object. If the Range is satisfiable, only the `ContentLength` is affected in the response. If the Range is not satisfiable, S3 returns a `416 - Requested Range Not Satisfiable` error. String? get range; - /// VersionId used to reference a specific version of the object. + /// Version ID used to reference a specific version of the object. + /// + /// For directory buckets in this API operation, only the `null` value of the version ID is supported. String? get versionId; - /// Specifies the algorithm to use to when encrypting the object (for example, AES256). + /// Specifies the algorithm to use when encrypting the object (for example, AES256). + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; /// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKey; /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; /// Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object. int? get partNumber; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; /// To retrieve the checksum, this parameter must be enabled. @@ -200,6 +258,7 @@ abstract class HeadObjectRequest @override HeadObjectRequestPayload getPayload() => HeadObjectRequestPayload(); + @override List get props => [ bucket, @@ -218,6 +277,7 @@ abstract class HeadObjectRequest expectedBucketOwner, checksumMode, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('HeadObjectRequest') @@ -299,6 +359,7 @@ abstract class HeadObjectRequestPayload @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('HeadObjectRequestPayload'); @@ -317,6 +378,7 @@ class HeadObjectRequestRestXmlSerializer HeadObjectRequestPayload, _$HeadObjectRequestPayload, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -324,6 +386,7 @@ class HeadObjectRequestRestXmlSerializer shape: 'restXml', ) ]; + @override HeadObjectRequestPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/initiator.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/initiator.dart index 5a3d3401cd..542e8e58cb 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/initiator.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/initiator.dart @@ -36,15 +36,20 @@ abstract class Initiator ]; /// If the principal is an Amazon Web Services account, it provides the Canonical User ID. If the principal is an IAM User, it provides a user ARN value. + /// + /// **Directory buckets** \- If the principal is an Amazon Web Services account, it provides the Amazon Web Services account ID. If the principal is an IAM User, it provides a user ARN value. String? get id; /// Name of the Principal. + /// + /// This functionality is not supported for directory buckets. String? get displayName; @override List get props => [ id, displayName, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('Initiator') @@ -69,6 +74,7 @@ class InitiatorRestXmlSerializer Initiator, _$Initiator, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -76,6 +82,7 @@ class InitiatorRestXmlSerializer shape: 'restXml', ) ]; + @override Initiator deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/input_serialization.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/input_serialization.dart index 94fd2eb473..3bd6698f69 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/input_serialization.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/input_serialization.dart @@ -62,6 +62,7 @@ abstract class InputSerialization json, parquet, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('InputSerialization') @@ -94,6 +95,7 @@ class InputSerializationRestXmlSerializer InputSerialization, _$InputSerialization, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -101,6 +103,7 @@ class InputSerializationRestXmlSerializer shape: 'restXml', ) ]; + @override InputSerialization deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/invalid_object_state.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/invalid_object_state.dart index 4403716e60..e2be8e1000 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/invalid_object_state.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/invalid_object_state.dart @@ -13,12 +13,16 @@ import 'package:smithy/smithy.dart' as _i2; part 'invalid_object_state.g.dart'; /// Object is archived and inaccessible until restored. +/// +/// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). Otherwise, this operation returns an `InvalidObjectState` error. For information about restoring archived objects, see [Restoring Archived Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) in the _Amazon S3 User Guide_. abstract class InvalidObjectState with _i1.AWSEquatable implements Built, _i2.SmithyHttpException { /// Object is archived and inaccessible until restored. + /// + /// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). Otherwise, this operation returns an `InvalidObjectState` error. For information about restoring archived objects, see [Restoring Archived Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) in the _Amazon S3 User Guide_. factory InvalidObjectState({ StorageClass? storageClass, IntelligentTieringAccessTier? accessTier, @@ -30,6 +34,8 @@ abstract class InvalidObjectState } /// Object is archived and inaccessible until restored. + /// + /// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). Otherwise, this operation returns an `InvalidObjectState` error. For information about restoring archived objects, see [Restoring Archived Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) in the _Amazon S3 User Guide_. factory InvalidObjectState.build( [void Function(InvalidObjectStateBuilder) updates]) = _$InvalidObjectState; @@ -42,7 +48,6 @@ abstract class InvalidObjectState _i1.AWSBaseHttpResponse response, ) => payload.rebuild((b) { - b.statusCode = response.statusCode; b.headers = response.headers; }); @@ -57,23 +62,29 @@ abstract class InvalidObjectState namespace: 'com.amazonaws.s3', shape: 'InvalidObjectState', ); + @override String? get message => null; + @override _i2.RetryConfig? get retryConfig => null; + @override @BuiltValueField(compare: false) - int? get statusCode; + int get statusCode => 403; + @override @BuiltValueField(compare: false) Map? get headers; @override Exception? get underlyingException => null; + @override List get props => [ storageClass, accessTier, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('InvalidObjectState') @@ -98,6 +109,7 @@ class InvalidObjectStateRestXmlSerializer InvalidObjectState, _$InvalidObjectState, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -105,6 +117,7 @@ class InvalidObjectStateRestXmlSerializer shape: 'restXml', ) ]; + @override InvalidObjectState deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/invalid_object_state.g.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/invalid_object_state.g.dart index dcbc504850..0631aad4ce 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/invalid_object_state.g.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/invalid_object_state.g.dart @@ -12,16 +12,13 @@ class _$InvalidObjectState extends InvalidObjectState { @override final IntelligentTieringAccessTier? accessTier; @override - final int? statusCode; - @override final Map? headers; factory _$InvalidObjectState( [void Function(InvalidObjectStateBuilder)? updates]) => (new InvalidObjectStateBuilder()..update(updates))._build(); - _$InvalidObjectState._( - {this.storageClass, this.accessTier, this.statusCode, this.headers}) + _$InvalidObjectState._({this.storageClass, this.accessTier, this.headers}) : super._(); @override @@ -65,10 +62,6 @@ class InvalidObjectStateBuilder set accessTier(IntelligentTieringAccessTier? accessTier) => _$this._accessTier = accessTier; - int? _statusCode; - int? get statusCode => _$this._statusCode; - set statusCode(int? statusCode) => _$this._statusCode = statusCode; - Map? _headers; Map? get headers => _$this._headers; set headers(Map? headers) => _$this._headers = headers; @@ -80,7 +73,6 @@ class InvalidObjectStateBuilder if ($v != null) { _storageClass = $v.storageClass; _accessTier = $v.accessTier; - _statusCode = $v.statusCode; _headers = $v.headers; _$v = null; } @@ -106,7 +98,6 @@ class InvalidObjectStateBuilder new _$InvalidObjectState._( storageClass: storageClass, accessTier: accessTier, - statusCode: statusCode, headers: headers); replace(_$result); return _$result; diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/json_input.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/json_input.dart index 43815e583d..6022be21af 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/json_input.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/json_input.dart @@ -34,6 +34,7 @@ abstract class JsonInput JsonType? get type; @override List get props => [type]; + @override String toString() { final helper = newBuiltValueToStringHelper('JsonInput') @@ -54,6 +55,7 @@ class JsonInputRestXmlSerializer JsonInput, _$JsonInput, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -61,6 +63,7 @@ class JsonInputRestXmlSerializer shape: 'restXml', ) ]; + @override JsonInput deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/json_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/json_output.dart index 5516b9edab..6654754b8f 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/json_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/json_output.dart @@ -33,6 +33,7 @@ abstract class JsonOutput String? get recordDelimiter; @override List get props => [recordDelimiter]; + @override String toString() { final helper = newBuiltValueToStringHelper('JsonOutput') @@ -53,6 +54,7 @@ class JsonOutputRestXmlSerializer JsonOutput, _$JsonOutput, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -60,6 +62,7 @@ class JsonOutputRestXmlSerializer shape: 'restXml', ) ]; + @override JsonOutput deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_multipart_uploads_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_multipart_uploads_output.dart index 632d029392..a2cf711c40 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_multipart_uploads_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_multipart_uploads_output.dart @@ -98,18 +98,26 @@ abstract class ListMultipartUploadsOutput String? get keyMarker; /// Upload ID after which listing began. + /// + /// This functionality is not supported for directory buckets. String? get uploadIdMarker; /// When a list is truncated, this element specifies the value that should be used for the key-marker request parameter in a subsequent request. String? get nextKeyMarker; /// When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix. + /// + /// **Directory buckets** \- For directory buckets, only prefixes that end in a delimiter (`/`) are supported. String? get prefix; /// Contains the delimiter you specified in the request. If you don't specify a delimiter in your request, this element is absent from the response. + /// + /// **Directory buckets** \- For directory buckets, `/` is the only supported delimiter. String? get delimiter; /// When a list is truncated, this element specifies the value that should be used for the `upload-id-marker` request parameter in a subsequent request. + /// + /// This functionality is not supported for directory buckets. String? get nextUploadIdMarker; /// Maximum number of multipart uploads that could have been included in the response. @@ -122,6 +130,8 @@ abstract class ListMultipartUploadsOutput _i3.BuiltList? get uploads; /// If you specify a delimiter in the request, then the result returns each distinct key prefix containing the delimiter in a `CommonPrefixes` element. The distinct key prefixes are returned in the `Prefix` child element. + /// + /// **Directory buckets** \- For directory buckets, only prefixes that end in a delimiter (`/`) are supported. _i3.BuiltList? get commonPrefixes; /// Encoding type used by Amazon S3 to encode object keys in the response. @@ -132,6 +142,8 @@ abstract class ListMultipartUploadsOutput EncodingType? get encodingType; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; @override ListMultipartUploadsOutputPayload getPayload() => @@ -153,6 +165,7 @@ abstract class ListMultipartUploadsOutput b.uploads.replace(uploads!); } }); + @override List get props => [ bucket, @@ -169,6 +182,7 @@ abstract class ListMultipartUploadsOutput encodingType, requestCharged, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('ListMultipartUploadsOutput') @@ -245,9 +259,13 @@ abstract class ListMultipartUploadsOutputPayload String? get bucket; /// If you specify a delimiter in the request, then the result returns each distinct key prefix containing the delimiter in a `CommonPrefixes` element. The distinct key prefixes are returned in the `Prefix` child element. + /// + /// **Directory buckets** \- For directory buckets, only prefixes that end in a delimiter (`/`) are supported. _i3.BuiltList? get commonPrefixes; /// Contains the delimiter you specified in the request. If you don't specify a delimiter in your request, this element is absent from the response. + /// + /// **Directory buckets** \- For directory buckets, `/` is the only supported delimiter. String? get delimiter; /// Encoding type used by Amazon S3 to encode object keys in the response. @@ -270,12 +288,18 @@ abstract class ListMultipartUploadsOutputPayload String? get nextKeyMarker; /// When a list is truncated, this element specifies the value that should be used for the `upload-id-marker` request parameter in a subsequent request. + /// + /// This functionality is not supported for directory buckets. String? get nextUploadIdMarker; /// When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix. + /// + /// **Directory buckets** \- For directory buckets, only prefixes that end in a delimiter (`/`) are supported. String? get prefix; /// Upload ID after which listing began. + /// + /// This functionality is not supported for directory buckets. String? get uploadIdMarker; /// Container for elements related to a particular multipart upload. A response can contain zero or more `Upload` elements. @@ -295,6 +319,7 @@ abstract class ListMultipartUploadsOutputPayload uploadIdMarker, uploads, ]; + @override String toString() { final helper = @@ -363,6 +388,7 @@ class ListMultipartUploadsOutputRestXmlSerializer ListMultipartUploadsOutputPayload, _$ListMultipartUploadsOutputPayload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -370,6 +396,7 @@ class ListMultipartUploadsOutputRestXmlSerializer shape: 'restXml', ) ]; + @override ListMultipartUploadsOutputPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_multipart_uploads_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_multipart_uploads_request.dart index a893c25549..4027d35a37 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_multipart_uploads_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_multipart_uploads_request.dart @@ -94,39 +94,57 @@ abstract class ListMultipartUploadsRequest /// The name of the bucket to which the multipart upload was initiated. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. + /// + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// Character you use to group keys. /// /// All keys that contain the same string between the prefix, if specified, and the first occurrence of the delimiter after the prefix are grouped under a single result element, `CommonPrefixes`. If you don't specify the prefix parameter, then the substring starts at the beginning of the key. The keys that are grouped under `CommonPrefixes` result element are not returned elsewhere in the response. + /// + /// **Directory buckets** \- For directory buckets, `/` is the only supported delimiter. String? get delimiter; /// Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key can contain any Unicode character; however, the XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response. EncodingType? get encodingType; - /// Together with `upload-id-marker`, this parameter specifies the multipart upload after which listing should begin. + /// Specifies the multipart upload after which listing should begin. + /// + /// * **General purpose buckets** \- For general purpose buckets, `key-marker` is an object key. Together with `upload-id-marker`, this parameter specifies the multipart upload after which listing should begin. /// - /// If `upload-id-marker` is not specified, only the keys lexicographically greater than the specified `key-marker` will be included in the list. + /// If `upload-id-marker` is not specified, only the keys lexicographically greater than the specified `key-marker` will be included in the list. /// - /// If `upload-id-marker` is specified, any multipart uploads for a key equal to the `key-marker` might also be included, provided those multipart uploads have upload IDs lexicographically greater than the specified `upload-id-marker`. + /// If `upload-id-marker` is specified, any multipart uploads for a key equal to the `key-marker` might also be included, provided those multipart uploads have upload IDs lexicographically greater than the specified `upload-id-marker`. + /// + /// * **Directory buckets** \- For directory buckets, `key-marker` is obfuscated and isn't a real object key. The `upload-id-marker` parameter isn't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of `key-marker` to the `NextKeyMarker` value from the previous response. + /// + /// In the `ListMultipartUploads` response, the multipart uploads aren't sorted lexicographically based on the object keys. String? get keyMarker; /// Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the response body. 1,000 is the maximum number of uploads that can be returned in a response. int? get maxUploads; /// Lists in-progress uploads only for those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different grouping of keys. (You can think of using `prefix` to make groups in the same way that you'd use a folder in a file system.) + /// + /// **Directory buckets** \- For directory buckets, only prefixes that end in a delimiter (`/`) are supported. String? get prefix; /// Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored. Otherwise, any multipart uploads for a key equal to the key-marker might be included in the list only if they have an upload ID lexicographically greater than the specified `upload-id-marker`. + /// + /// This functionality is not supported for directory buckets. String? get uploadIdMarker; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; @override String labelFor(String key) { @@ -143,6 +161,7 @@ abstract class ListMultipartUploadsRequest @override ListMultipartUploadsRequestPayload getPayload() => ListMultipartUploadsRequestPayload(); + @override List get props => [ bucket, @@ -155,6 +174,7 @@ abstract class ListMultipartUploadsRequest expectedBucketOwner, requestPayer, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('ListMultipartUploadsRequest') @@ -214,6 +234,7 @@ abstract class ListMultipartUploadsRequestPayload @override List get props => []; + @override String toString() { final helper = @@ -234,6 +255,7 @@ class ListMultipartUploadsRequestRestXmlSerializer ListMultipartUploadsRequestPayload, _$ListMultipartUploadsRequestPayload, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -241,6 +263,7 @@ class ListMultipartUploadsRequestRestXmlSerializer shape: 'restXml', ) ]; + @override ListMultipartUploadsRequestPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_objects_v2_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_objects_v2_output.dart index 97b7005841..0df9a60dc1 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_objects_v2_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_objects_v2_output.dart @@ -98,22 +98,22 @@ abstract class ListObjectsV2Output _i3.BuiltList? get contents; /// The bucket name. - /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. - /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String? get name; /// Keys that begin with the indicated prefix. + /// + /// **Directory buckets** \- For directory buckets, only prefixes that end in a delimiter (`/`) are supported. String? get prefix; /// Causes keys that contain the same string between the `prefix` and the first occurrence of the delimiter to be rolled up into a single result element in the `CommonPrefixes` collection. These rolled-up keys are not returned elsewhere in the response. Each rolled-up result counts as only one return against the `MaxKeys` value. + /// + /// **Directory buckets** \- For directory buckets, `/` is the only supported delimiter. String? get delimiter; /// Sets the maximum number of keys returned in the response. By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. int? get maxKeys; - /// All of the keys (up to 1,000) rolled up into a common prefix count as a single return when calculating the number of returns. + /// All of the keys (up to 1,000) that share the same prefix are grouped together. When counting the total numbers of returns by this API operation, this group of keys is considered as one item. /// /// A response can contain `CommonPrefixes` only if you specify a delimiter. /// @@ -122,6 +122,10 @@ abstract class ListObjectsV2Output /// `CommonPrefixes` lists keys that act like subdirectories in the directory specified by `Prefix`. /// /// For example, if the prefix is `notes/` and the delimiter is a slash (`/`) as in `notes/summer/july`, the common prefix is `notes/summer/`. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns. + /// + /// * **Directory buckets** \- For directory buckets, only prefixes that end in a delimiter (`/`) are supported. + /// + /// * **Directory buckets** \- When you query `ListObjectsV2` with a delimiter during in-progress multipart uploads, the `CommonPrefixes` response parameter contains the prefixes that are associated with the in-progress multipart uploads. For more information about multipart uploads, see [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_. _i3.BuiltList? get commonPrefixes; /// Encoding type used by Amazon S3 to encode object key names in the XML response. @@ -134,16 +138,20 @@ abstract class ListObjectsV2Output /// `KeyCount` is the number of keys returned with this request. `KeyCount` will always be less than or equal to the `MaxKeys` field. For example, if you ask for 50 keys, your result will include 50 keys or fewer. int? get keyCount; - /// If `ContinuationToken` was sent with the request, it is included in the response. + /// If `ContinuationToken` was sent with the request, it is included in the response. You can use the returned `ContinuationToken` for pagination of the list response. You can use this `ContinuationToken` for pagination of the list results. String? get continuationToken; /// `NextContinuationToken` is sent when `isTruncated` is true, which means there are more keys in the bucket that can be listed. The next list requests to Amazon S3 can be continued with this `NextContinuationToken`. `NextContinuationToken` is obfuscated and is not a real key String? get nextContinuationToken; /// If StartAfter was sent with the request, it is included in the response. + /// + /// This functionality is not supported for directory buckets. String? get startAfter; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; @override ListObjectsV2OutputPayload getPayload() => ListObjectsV2OutputPayload((b) { @@ -164,6 +172,7 @@ abstract class ListObjectsV2Output b.prefix = prefix; b.startAfter = startAfter; }); + @override List get props => [ isTruncated, @@ -180,6 +189,7 @@ abstract class ListObjectsV2Output startAfter, requestCharged, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('ListObjectsV2Output') @@ -250,7 +260,7 @@ abstract class ListObjectsV2OutputPayload const ListObjectsV2OutputPayload._(); - /// All of the keys (up to 1,000) rolled up into a common prefix count as a single return when calculating the number of returns. + /// All of the keys (up to 1,000) that share the same prefix are grouped together. When counting the total numbers of returns by this API operation, this group of keys is considered as one item. /// /// A response can contain `CommonPrefixes` only if you specify a delimiter. /// @@ -259,15 +269,21 @@ abstract class ListObjectsV2OutputPayload /// `CommonPrefixes` lists keys that act like subdirectories in the directory specified by `Prefix`. /// /// For example, if the prefix is `notes/` and the delimiter is a slash (`/`) as in `notes/summer/july`, the common prefix is `notes/summer/`. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns. + /// + /// * **Directory buckets** \- For directory buckets, only prefixes that end in a delimiter (`/`) are supported. + /// + /// * **Directory buckets** \- When you query `ListObjectsV2` with a delimiter during in-progress multipart uploads, the `CommonPrefixes` response parameter contains the prefixes that are associated with the in-progress multipart uploads. For more information about multipart uploads, see [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_. _i3.BuiltList? get commonPrefixes; /// Metadata about each object returned. _i3.BuiltList? get contents; - /// If `ContinuationToken` was sent with the request, it is included in the response. + /// If `ContinuationToken` was sent with the request, it is included in the response. You can use the returned `ContinuationToken` for pagination of the list response. You can use this `ContinuationToken` for pagination of the list results. String? get continuationToken; /// Causes keys that contain the same string between the `prefix` and the first occurrence of the delimiter to be rolled up into a single result element in the `CommonPrefixes` collection. These rolled-up keys are not returned elsewhere in the response. Each rolled-up result counts as only one return against the `MaxKeys` value. + /// + /// **Directory buckets** \- For directory buckets, `/` is the only supported delimiter. String? get delimiter; /// Encoding type used by Amazon S3 to encode object key names in the XML response. @@ -287,19 +303,19 @@ abstract class ListObjectsV2OutputPayload int? get maxKeys; /// The bucket name. - /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. - /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String? get name; /// `NextContinuationToken` is sent when `isTruncated` is true, which means there are more keys in the bucket that can be listed. The next list requests to Amazon S3 can be continued with this `NextContinuationToken`. `NextContinuationToken` is obfuscated and is not a real key String? get nextContinuationToken; /// Keys that begin with the indicated prefix. + /// + /// **Directory buckets** \- For directory buckets, only prefixes that end in a delimiter (`/`) are supported. String? get prefix; /// If StartAfter was sent with the request, it is included in the response. + /// + /// This functionality is not supported for directory buckets. String? get startAfter; @override List get props => [ @@ -316,6 +332,7 @@ abstract class ListObjectsV2OutputPayload prefix, startAfter, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('ListObjectsV2OutputPayload') @@ -382,6 +399,7 @@ class ListObjectsV2OutputRestXmlSerializer ListObjectsV2OutputPayload, _$ListObjectsV2OutputPayload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -389,6 +407,7 @@ class ListObjectsV2OutputRestXmlSerializer shape: 'restXml', ) ]; + @override ListObjectsV2OutputPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_objects_v2_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_objects_v2_request.dart index 73a7d5dc7e..2e537614ba 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_objects_v2_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_objects_v2_request.dart @@ -108,14 +108,20 @@ abstract class ListObjectsV2Request static const List<_i1.SmithySerializer> serializers = [ListObjectsV2RequestRestXmlSerializer()]; - /// Bucket name to list. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// Access points and Object Lambda access points are not supported by directory buckets. + /// + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// A delimiter is a character that you use to group keys. + /// + /// * **Directory buckets** \- For directory buckets, `/` is the only supported delimiter. + /// + /// * **Directory buckets** \- When you query `ListObjectsV2` with a delimiter during in-progress multipart uploads, the `CommonPrefixes` response parameter contains the prefixes that are associated with the in-progress multipart uploads. For more information about multipart uploads, see [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_. String? get delimiter; /// Encoding type used by Amazon S3 to encode object keys in the response. @@ -125,24 +131,34 @@ abstract class ListObjectsV2Request int? get maxKeys; /// Limits the response to keys that begin with the specified prefix. + /// + /// **Directory buckets** \- For directory buckets, only prefixes that end in a delimiter (`/`) are supported. String? get prefix; - /// `ContinuationToken` indicates to Amazon S3 that the list is being continued on this bucket with a token. `ContinuationToken` is obfuscated and is not a real key. + /// `ContinuationToken` indicates to Amazon S3 that the list is being continued on this bucket with a token. `ContinuationToken` is obfuscated and is not a real key. You can use this `ContinuationToken` for pagination of the list results. String? get continuationToken; /// The owner field is not present in `ListObjectsV2` by default. If you want to return the owner field with each key in the result, then set the `FetchOwner` field to `true`. + /// + /// **Directory buckets** \- For directory buckets, the bucket owner is returned as the object owner for all objects. bool? get fetchOwner; /// StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket. + /// + /// This functionality is not supported for directory buckets. String? get startAfter; /// Confirms that the requester knows that she or he will be charged for the list objects request in V2 style. Bucket owners need not specify this parameter in their requests. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; /// Specifies the optional fields that you want returned in the response. Fields that you do not specify are not returned. + /// + /// This functionality is not supported for directory buckets. _i3.BuiltList? get optionalObjectAttributes; @override String labelFor(String key) { @@ -158,6 +174,7 @@ abstract class ListObjectsV2Request @override ListObjectsV2RequestPayload getPayload() => ListObjectsV2RequestPayload(); + @override List get props => [ bucket, @@ -172,6 +189,7 @@ abstract class ListObjectsV2Request expectedBucketOwner, optionalObjectAttributes, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('ListObjectsV2Request') @@ -237,6 +255,7 @@ abstract class ListObjectsV2RequestPayload @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('ListObjectsV2RequestPayload'); @@ -255,6 +274,7 @@ class ListObjectsV2RequestRestXmlSerializer ListObjectsV2RequestPayload, _$ListObjectsV2RequestPayload, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -262,6 +282,7 @@ class ListObjectsV2RequestRestXmlSerializer shape: 'restXml', ) ]; + @override ListObjectsV2RequestPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_parts_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_parts_output.dart index 188d208d06..44e1e1e165 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_parts_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_parts_output.dart @@ -109,9 +109,13 @@ abstract class ListPartsOutput /// If the bucket has a lifecycle rule configured with an action to abort incomplete multipart uploads and the prefix in the lifecycle rule matches the object name in the request, then the response includes this header indicating when the initiated multipart upload will become eligible for abort operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). /// /// The response will also include the `x-amz-abort-rule-id` header that will provide the ID of the lifecycle configuration rule that defines this action. + /// + /// This functionality is not supported for directory buckets. DateTime? get abortDate; /// This header is returned along with the `x-amz-abort-date` header. It identifies applicable lifecycle configuration rule that defines the action to abort incomplete multipart uploads. + /// + /// This functionality is not supported for directory buckets. String? get abortRuleId; /// The name of the bucket to which the multipart upload was initiated. Does not return the access point ARN or access point alias if used. @@ -142,12 +146,18 @@ abstract class ListPartsOutput Initiator? get initiator; /// Container element that identifies the object owner, after the object is created. If multipart upload is initiated by an IAM user, this element provides the parent account ID and display name. + /// + /// **Directory buckets** \- The bucket owner is returned as the object owner for all the parts. Owner? get owner; - /// Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded object. + /// The class of storage used to store the uploaded object. + /// + /// **Directory buckets** \- Only the S3 Express One Zone storage class is supported by directory buckets to store objects. StorageClass? get storageClass; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; /// The algorithm that was used to create a checksum of the object. @@ -173,6 +183,7 @@ abstract class ListPartsOutput b.storageClass = storageClass; b.uploadId = uploadId; }); + @override List get props => [ abortDate, @@ -191,6 +202,7 @@ abstract class ListPartsOutput requestCharged, checksumAlgorithm, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('ListPartsOutput') @@ -290,6 +302,8 @@ abstract class ListPartsOutputPayload String? get nextPartNumberMarker; /// Container element that identifies the object owner, after the object is created. If multipart upload is initiated by an IAM user, this element provides the parent account ID and display name. + /// + /// **Directory buckets** \- The bucket owner is returned as the object owner for all the parts. Owner? get owner; /// When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request. @@ -298,7 +312,9 @@ abstract class ListPartsOutputPayload /// Container for elements related to a particular part. A response can contain zero or more `Part` elements. _i3.BuiltList? get parts; - /// Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded object. + /// The class of storage used to store the uploaded object. + /// + /// **Directory buckets** \- Only the S3 Express One Zone storage class is supported by directory buckets to store objects. StorageClass? get storageClass; /// Upload ID identifying the multipart upload whose parts are being listed. @@ -318,6 +334,7 @@ abstract class ListPartsOutputPayload storageClass, uploadId, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('ListPartsOutputPayload') @@ -384,6 +401,7 @@ class ListPartsOutputRestXmlSerializer ListPartsOutputPayload, _$ListPartsOutputPayload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -391,6 +409,7 @@ class ListPartsOutputRestXmlSerializer shape: 'restXml', ) ]; + @override ListPartsOutputPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_parts_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_parts_request.dart index a72ff67c3b..f391218cc2 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_parts_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/list_parts_request.dart @@ -103,9 +103,13 @@ abstract class ListPartsRequest /// The name of the bucket to which the parts are being uploaded. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. + /// + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// Object key for which the multipart upload was initiated. @@ -120,19 +124,27 @@ abstract class ListPartsRequest /// Upload ID identifying the multipart upload whose parts are being listed. String get uploadId; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; /// The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is needed only when the object was created using a checksum algorithm. For more information, see [Protecting data using SSE-C keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; /// The server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see [Protecting data using SSE-C keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKey; /// The MD5 server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see [Protecting data using SSE-C keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; @override String labelFor(String key) { @@ -150,6 +162,7 @@ abstract class ListPartsRequest @override ListPartsRequestPayload getPayload() => ListPartsRequestPayload(); + @override List get props => [ bucket, @@ -163,6 +176,7 @@ abstract class ListPartsRequest sseCustomerKey, sseCustomerKeyMd5, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('ListPartsRequest') @@ -224,6 +238,7 @@ abstract class ListPartsRequestPayload @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('ListPartsRequestPayload'); @@ -242,6 +257,7 @@ class ListPartsRequestRestXmlSerializer ListPartsRequestPayload, _$ListPartsRequestPayload, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -249,6 +265,7 @@ class ListPartsRequestRestXmlSerializer shape: 'restXml', ) ]; + @override ListPartsRequestPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/multipart_upload.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/multipart_upload.dart index 3ebb7b2d17..513527b9cf 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/multipart_upload.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/multipart_upload.dart @@ -59,9 +59,13 @@ abstract class MultipartUpload DateTime? get initiated; /// The class of storage used to store the object. + /// + /// **Directory buckets** \- Only the S3 Express One Zone storage class is supported by directory buckets to store objects. StorageClass? get storageClass; /// Specifies the owner of the object that is part of the multipart upload. + /// + /// **Directory buckets** \- The bucket owner is returned as the object owner for all the objects. Owner? get owner; /// Identifies who initiated the multipart upload. @@ -79,6 +83,7 @@ abstract class MultipartUpload initiator, checksumAlgorithm, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('MultipartUpload') @@ -123,6 +128,7 @@ class MultipartUploadRestXmlSerializer MultipartUpload, _$MultipartUpload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -130,6 +136,7 @@ class MultipartUploadRestXmlSerializer shape: 'restXml', ) ]; + @override MultipartUpload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_bucket.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_bucket.dart index a058edfcfe..f1aa874c0a 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_bucket.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_bucket.dart @@ -34,7 +34,6 @@ abstract class NoSuchBucket _i1.AWSBaseHttpResponse response, ) => payload.rebuild((b) { - b.statusCode = response.statusCode; b.headers = response.headers; }); @@ -47,20 +46,26 @@ abstract class NoSuchBucket namespace: 'com.amazonaws.s3', shape: 'NoSuchBucket', ); + @override String? get message => null; + @override _i2.RetryConfig? get retryConfig => null; + @override @BuiltValueField(compare: false) - int? get statusCode; + int get statusCode => 404; + @override @BuiltValueField(compare: false) Map? get headers; @override Exception? get underlyingException => null; + @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('NoSuchBucket'); @@ -77,6 +82,7 @@ class NoSuchBucketRestXmlSerializer NoSuchBucket, _$NoSuchBucket, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -84,6 +90,7 @@ class NoSuchBucketRestXmlSerializer shape: 'restXml', ) ]; + @override NoSuchBucket deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_bucket.g.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_bucket.g.dart index 415c6b74b6..b076ff6056 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_bucket.g.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_bucket.g.dart @@ -7,15 +7,13 @@ part of 'no_such_bucket.dart'; // ************************************************************************** class _$NoSuchBucket extends NoSuchBucket { - @override - final int? statusCode; @override final Map? headers; factory _$NoSuchBucket([void Function(NoSuchBucketBuilder)? updates]) => (new NoSuchBucketBuilder()..update(updates))._build(); - _$NoSuchBucket._({this.statusCode, this.headers}) : super._(); + _$NoSuchBucket._({this.headers}) : super._(); @override NoSuchBucket rebuild(void Function(NoSuchBucketBuilder) updates) => @@ -40,10 +38,6 @@ class NoSuchBucketBuilder implements Builder { _$NoSuchBucket? _$v; - int? _statusCode; - int? get statusCode => _$this._statusCode; - set statusCode(int? statusCode) => _$this._statusCode = statusCode; - Map? _headers; Map? get headers => _$this._headers; set headers(Map? headers) => _$this._headers = headers; @@ -53,7 +47,6 @@ class NoSuchBucketBuilder NoSuchBucketBuilder get _$this { final $v = _$v; if ($v != null) { - _statusCode = $v.statusCode; _headers = $v.headers; _$v = null; } @@ -75,8 +68,7 @@ class NoSuchBucketBuilder NoSuchBucket build() => _build(); _$NoSuchBucket _build() { - final _$result = - _$v ?? new _$NoSuchBucket._(statusCode: statusCode, headers: headers); + final _$result = _$v ?? new _$NoSuchBucket._(headers: headers); replace(_$result); return _$result; } diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_key.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_key.dart index 73f1a62893..30196073c5 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_key.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_key.dart @@ -34,7 +34,6 @@ abstract class NoSuchKey _i1.AWSBaseHttpResponse response, ) => payload.rebuild((b) { - b.statusCode = response.statusCode; b.headers = response.headers; }); @@ -47,20 +46,26 @@ abstract class NoSuchKey namespace: 'com.amazonaws.s3', shape: 'NoSuchKey', ); + @override String? get message => null; + @override _i2.RetryConfig? get retryConfig => null; + @override @BuiltValueField(compare: false) - int? get statusCode; + int get statusCode => 404; + @override @BuiltValueField(compare: false) Map? get headers; @override Exception? get underlyingException => null; + @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('NoSuchKey'); @@ -77,6 +82,7 @@ class NoSuchKeyRestXmlSerializer NoSuchKey, _$NoSuchKey, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -84,6 +90,7 @@ class NoSuchKeyRestXmlSerializer shape: 'restXml', ) ]; + @override NoSuchKey deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_key.g.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_key.g.dart index ba87ca723c..e87c01a20f 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_key.g.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_key.g.dart @@ -7,15 +7,13 @@ part of 'no_such_key.dart'; // ************************************************************************** class _$NoSuchKey extends NoSuchKey { - @override - final int? statusCode; @override final Map? headers; factory _$NoSuchKey([void Function(NoSuchKeyBuilder)? updates]) => (new NoSuchKeyBuilder()..update(updates))._build(); - _$NoSuchKey._({this.statusCode, this.headers}) : super._(); + _$NoSuchKey._({this.headers}) : super._(); @override NoSuchKey rebuild(void Function(NoSuchKeyBuilder) updates) => @@ -39,10 +37,6 @@ class _$NoSuchKey extends NoSuchKey { class NoSuchKeyBuilder implements Builder { _$NoSuchKey? _$v; - int? _statusCode; - int? get statusCode => _$this._statusCode; - set statusCode(int? statusCode) => _$this._statusCode = statusCode; - Map? _headers; Map? get headers => _$this._headers; set headers(Map? headers) => _$this._headers = headers; @@ -52,7 +46,6 @@ class NoSuchKeyBuilder implements Builder { NoSuchKeyBuilder get _$this { final $v = _$v; if ($v != null) { - _statusCode = $v.statusCode; _headers = $v.headers; _$v = null; } @@ -74,8 +67,7 @@ class NoSuchKeyBuilder implements Builder { NoSuchKey build() => _build(); _$NoSuchKey _build() { - final _$result = - _$v ?? new _$NoSuchKey._(statusCode: statusCode, headers: headers); + final _$result = _$v ?? new _$NoSuchKey._(headers: headers); replace(_$result); return _$result; } diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_upload.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_upload.dart index 7bb2c9e8e7..cee237a5f9 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_upload.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_upload.dart @@ -34,7 +34,6 @@ abstract class NoSuchUpload _i1.AWSBaseHttpResponse response, ) => payload.rebuild((b) { - b.statusCode = response.statusCode; b.headers = response.headers; }); @@ -47,20 +46,26 @@ abstract class NoSuchUpload namespace: 'com.amazonaws.s3', shape: 'NoSuchUpload', ); + @override String? get message => null; + @override _i2.RetryConfig? get retryConfig => null; + @override @BuiltValueField(compare: false) - int? get statusCode; + int get statusCode => 404; + @override @BuiltValueField(compare: false) Map? get headers; @override Exception? get underlyingException => null; + @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('NoSuchUpload'); @@ -77,6 +82,7 @@ class NoSuchUploadRestXmlSerializer NoSuchUpload, _$NoSuchUpload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -84,6 +90,7 @@ class NoSuchUploadRestXmlSerializer shape: 'restXml', ) ]; + @override NoSuchUpload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_upload.g.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_upload.g.dart index f11b794c93..95e642be48 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_upload.g.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/no_such_upload.g.dart @@ -7,15 +7,13 @@ part of 'no_such_upload.dart'; // ************************************************************************** class _$NoSuchUpload extends NoSuchUpload { - @override - final int? statusCode; @override final Map? headers; factory _$NoSuchUpload([void Function(NoSuchUploadBuilder)? updates]) => (new NoSuchUploadBuilder()..update(updates))._build(); - _$NoSuchUpload._({this.statusCode, this.headers}) : super._(); + _$NoSuchUpload._({this.headers}) : super._(); @override NoSuchUpload rebuild(void Function(NoSuchUploadBuilder) updates) => @@ -40,10 +38,6 @@ class NoSuchUploadBuilder implements Builder { _$NoSuchUpload? _$v; - int? _statusCode; - int? get statusCode => _$this._statusCode; - set statusCode(int? statusCode) => _$this._statusCode = statusCode; - Map? _headers; Map? get headers => _$this._headers; set headers(Map? headers) => _$this._headers = headers; @@ -53,7 +47,6 @@ class NoSuchUploadBuilder NoSuchUploadBuilder get _$this { final $v = _$v; if ($v != null) { - _statusCode = $v.statusCode; _headers = $v.headers; _$v = null; } @@ -75,8 +68,7 @@ class NoSuchUploadBuilder NoSuchUpload build() => _build(); _$NoSuchUpload _build() { - final _$result = - _$v ?? new _$NoSuchUpload._(statusCode: statusCode, headers: headers); + final _$result = _$v ?? new _$NoSuchUpload._(headers: headers); replace(_$result); return _$result; } diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/not_found.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/not_found.dart index ef3b9f3ee3..6e77142033 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/not_found.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/not_found.dart @@ -46,10 +46,13 @@ abstract class NotFound namespace: 'com.amazonaws.s3', shape: 'NotFound', ); + @override String? get message => null; + @override _i2.RetryConfig? get retryConfig => null; + @override @BuiltValueField(compare: false) int? get statusCode; @@ -58,8 +61,10 @@ abstract class NotFound Map? get headers; @override Exception? get underlyingException => null; + @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('NotFound'); @@ -76,6 +81,7 @@ class NotFoundRestXmlSerializer NotFound, _$NotFound, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -83,6 +89,7 @@ class NotFoundRestXmlSerializer shape: 'restXml', ) ]; + @override NotFound deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object.dart index 7f54bf73a8..fee0d4fb82 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object.dart @@ -66,6 +66,9 @@ abstract class S3Object /// * Objects created by the PUT Object, POST Object, or Copy operation, or through the Amazon Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object data. /// /// * If an object is created by either the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest, regardless of the method of encryption. If an object is larger than 16 MB, the Amazon Web Services Management Console will upload or copy that object as a Multipart Upload, and therefore the ETag will not be an MD5 digest. + /// + /// + /// **Directory buckets** \- MD5 is not supported by directory buckets. String? get eTag; /// The algorithm that was used to create a checksum of the object. @@ -75,12 +78,18 @@ abstract class S3Object _i2.Int64? get size; /// The class of storage used to store the object. + /// + /// **Directory buckets** \- Only the S3 Express One Zone storage class is supported by directory buckets to store objects. ObjectStorageClass? get storageClass; /// The owner of the object + /// + /// **Directory buckets** \- The bucket owner is returned as the object owner. Owner? get owner; /// Specifies the restoration status of an object. Objects in certain storage classes must be restored before they can be retrieved. For more information about these storage classes and how to work with archived objects, see [Working with archived objects](https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects. RestoreStatus? get restoreStatus; @override List get props => [ @@ -93,6 +102,7 @@ abstract class S3Object owner, restoreStatus, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('S3Object') @@ -140,6 +150,7 @@ class ObjectRestXmlSerializer extends _i4.StructuredSmithySerializer { S3Object, _$S3Object, ]; + @override Iterable<_i4.ShapeId> get supportedProtocols => const [ _i4.ShapeId( @@ -147,6 +158,7 @@ class ObjectRestXmlSerializer extends _i4.StructuredSmithySerializer { shape: 'restXml', ) ]; + @override S3Object deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_identifier.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_identifier.dart index 785b8cb5c1..43249d1fbf 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_identifier.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_identifier.dart @@ -40,13 +40,16 @@ abstract class ObjectIdentifier /// Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see [XML related object key constraints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). String get key; - /// VersionId for the specific version of the object to delete. + /// Version ID for the specific version of the object to delete. + /// + /// This functionality is not supported for directory buckets. String? get versionId; @override List get props => [ key, versionId, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('ObjectIdentifier') @@ -71,6 +74,7 @@ class ObjectIdentifierRestXmlSerializer ObjectIdentifier, _$ObjectIdentifier, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -78,6 +82,7 @@ class ObjectIdentifierRestXmlSerializer shape: 'restXml', ) ]; + @override ObjectIdentifier deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_not_in_active_tier_error.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_not_in_active_tier_error.dart index 1a9cd645b0..359125a5df 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_not_in_active_tier_error.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_not_in_active_tier_error.dart @@ -35,7 +35,6 @@ abstract class ObjectNotInActiveTierError _i1.AWSBaseHttpResponse response, ) => payload.rebuild((b) { - b.statusCode = response.statusCode; b.headers = response.headers; }); @@ -47,20 +46,26 @@ abstract class ObjectNotInActiveTierError namespace: 'com.amazonaws.s3', shape: 'ObjectNotInActiveTierError', ); + @override String? get message => null; + @override _i2.RetryConfig? get retryConfig => null; + @override @BuiltValueField(compare: false) - int? get statusCode; + int get statusCode => 403; + @override @BuiltValueField(compare: false) Map? get headers; @override Exception? get underlyingException => null; + @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('ObjectNotInActiveTierError'); @@ -78,6 +83,7 @@ class ObjectNotInActiveTierErrorRestXmlSerializer ObjectNotInActiveTierError, _$ObjectNotInActiveTierError, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -85,6 +91,7 @@ class ObjectNotInActiveTierErrorRestXmlSerializer shape: 'restXml', ) ]; + @override ObjectNotInActiveTierError deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_not_in_active_tier_error.g.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_not_in_active_tier_error.g.dart index fdb3992005..b4f1b0f674 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_not_in_active_tier_error.g.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_not_in_active_tier_error.g.dart @@ -7,8 +7,6 @@ part of 'object_not_in_active_tier_error.dart'; // ************************************************************************** class _$ObjectNotInActiveTierError extends ObjectNotInActiveTierError { - @override - final int? statusCode; @override final Map? headers; @@ -16,7 +14,7 @@ class _$ObjectNotInActiveTierError extends ObjectNotInActiveTierError { [void Function(ObjectNotInActiveTierErrorBuilder)? updates]) => (new ObjectNotInActiveTierErrorBuilder()..update(updates))._build(); - _$ObjectNotInActiveTierError._({this.statusCode, this.headers}) : super._(); + _$ObjectNotInActiveTierError._({this.headers}) : super._(); @override ObjectNotInActiveTierError rebuild( @@ -44,10 +42,6 @@ class ObjectNotInActiveTierErrorBuilder Builder { _$ObjectNotInActiveTierError? _$v; - int? _statusCode; - int? get statusCode => _$this._statusCode; - set statusCode(int? statusCode) => _$this._statusCode = statusCode; - Map? _headers; Map? get headers => _$this._headers; set headers(Map? headers) => _$this._headers = headers; @@ -57,7 +51,6 @@ class ObjectNotInActiveTierErrorBuilder ObjectNotInActiveTierErrorBuilder get _$this { final $v = _$v; if ($v != null) { - _statusCode = $v.statusCode; _headers = $v.headers; _$v = null; } @@ -79,9 +72,8 @@ class ObjectNotInActiveTierErrorBuilder ObjectNotInActiveTierError build() => _build(); _$ObjectNotInActiveTierError _build() { - final _$result = _$v ?? - new _$ObjectNotInActiveTierError._( - statusCode: statusCode, headers: headers); + final _$result = + _$v ?? new _$ObjectNotInActiveTierError._(headers: headers); replace(_$result); return _$result; } diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_storage_class.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_storage_class.dart index 946c68b437..9b1d905078 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_storage_class.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/object_storage_class.dart @@ -20,56 +20,62 @@ class ObjectStorageClass extends _i1.SmithyEnum { 'DEEP_ARCHIVE', ); - static const glacier = ObjectStorageClass._( + static const expressOnezone = ObjectStorageClass._( 1, + 'EXPRESS_ONEZONE', + 'EXPRESS_ONEZONE', + ); + + static const glacier = ObjectStorageClass._( + 2, 'GLACIER', 'GLACIER', ); static const glacierIr = ObjectStorageClass._( - 2, + 3, 'GLACIER_IR', 'GLACIER_IR', ); static const intelligentTiering = ObjectStorageClass._( - 3, + 4, 'INTELLIGENT_TIERING', 'INTELLIGENT_TIERING', ); static const onezoneIa = ObjectStorageClass._( - 4, + 5, 'ONEZONE_IA', 'ONEZONE_IA', ); static const outposts = ObjectStorageClass._( - 5, + 6, 'OUTPOSTS', 'OUTPOSTS', ); static const reducedRedundancy = ObjectStorageClass._( - 6, + 7, 'REDUCED_REDUNDANCY', 'REDUCED_REDUNDANCY', ); static const snow = ObjectStorageClass._( - 7, + 8, 'SNOW', 'SNOW', ); static const standard = ObjectStorageClass._( - 8, + 9, 'STANDARD', 'STANDARD', ); static const standardIa = ObjectStorageClass._( - 9, + 10, 'STANDARD_IA', 'STANDARD_IA', ); @@ -77,6 +83,7 @@ class ObjectStorageClass extends _i1.SmithyEnum { /// All values of [ObjectStorageClass]. static const values = [ ObjectStorageClass.deepArchive, + ObjectStorageClass.expressOnezone, ObjectStorageClass.glacier, ObjectStorageClass.glacierIr, ObjectStorageClass.intelligentTiering, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/output_serialization.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/output_serialization.dart index 8df71b3535..74a81e134d 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/output_serialization.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/output_serialization.dart @@ -48,6 +48,7 @@ abstract class OutputSerialization csv, json, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('OutputSerialization') @@ -72,6 +73,7 @@ class OutputSerializationRestXmlSerializer OutputSerialization, _$OutputSerialization, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -79,6 +81,7 @@ class OutputSerializationRestXmlSerializer shape: 'restXml', ) ]; + @override OutputSerialization deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/owner.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/owner.dart index 4995ca2732..e0a8a24cca 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/owner.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/owner.dart @@ -51,6 +51,9 @@ abstract class Owner /// * Europe (Ireland) /// /// * South America (São Paulo) + /// + /// + /// This functionality is not supported for directory buckets. String? get displayName; /// Container for the ID of the owner. @@ -60,6 +63,7 @@ abstract class Owner displayName, id, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('Owner') @@ -83,6 +87,7 @@ class OwnerRestXmlSerializer extends _i2.StructuredSmithySerializer { Owner, _$Owner, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -90,6 +95,7 @@ class OwnerRestXmlSerializer extends _i2.StructuredSmithySerializer { shape: 'restXml', ) ]; + @override Owner deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/parquet_input.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/parquet_input.dart index 94d4e04a6b..ac3b808193 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/parquet_input.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/parquet_input.dart @@ -31,6 +31,7 @@ abstract class ParquetInput @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('ParquetInput'); @@ -47,6 +48,7 @@ class ParquetInputRestXmlSerializer ParquetInput, _$ParquetInput, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -54,6 +56,7 @@ class ParquetInputRestXmlSerializer shape: 'restXml', ) ]; + @override ParquetInput deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/part.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/part.dart index 010d0f1fb4..1b28b59fc1 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/part.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/part.dart @@ -62,10 +62,10 @@ abstract class Part /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. String? get checksumCrc32; - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32C; - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha1; /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. @@ -81,6 +81,7 @@ abstract class Part checksumSha1, checksumSha256, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('Part') @@ -128,6 +129,7 @@ class PartRestXmlSerializer extends _i3.StructuredSmithySerializer { Part, _$Part, ]; + @override Iterable<_i3.ShapeId> get supportedProtocols => const [ _i3.ShapeId( @@ -135,6 +137,7 @@ class PartRestXmlSerializer extends _i3.StructuredSmithySerializer { shape: 'restXml', ) ]; + @override Part deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/progress.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/progress.dart index 9056d6b547..36ef23ae50 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/progress.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/progress.dart @@ -51,6 +51,7 @@ abstract class Progress bytesProcessed, bytesReturned, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('Progress') @@ -79,6 +80,7 @@ class ProgressRestXmlSerializer Progress, _$Progress, ]; + @override Iterable<_i3.ShapeId> get supportedProtocols => const [ _i3.ShapeId( @@ -86,6 +88,7 @@ class ProgressRestXmlSerializer shape: 'restXml', ) ]; + @override Progress deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/progress_event.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/progress_event.dart index 1e5513eb4d..82ffc7601a 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/progress_event.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/progress_event.dart @@ -34,6 +34,7 @@ abstract class ProgressEvent Progress? get details; @override List get props => [details]; + @override String toString() { final helper = newBuiltValueToStringHelper('ProgressEvent') @@ -54,6 +55,7 @@ class ProgressEventRestXmlSerializer ProgressEvent, _$ProgressEvent, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -61,6 +63,7 @@ class ProgressEventRestXmlSerializer shape: 'restXml', ) ]; + @override ProgressEvent deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/put_object_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/put_object_output.dart index f73368c347..cbcf793b5a 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/put_object_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/put_object_output.dart @@ -125,49 +125,74 @@ abstract class PutObjectOutput static const List<_i2.SmithySerializer> serializers = [PutObjectOutputRestXmlSerializer()]; - /// If the expiration is configured for the object (see [PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), the response includes this header. It includes the `expiry-date` and `rule-id` key-value pairs that provide information about object expiration. The value of the `rule-id` is URL-encoded. + /// If the expiration is configured for the object (see [PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)) in the _Amazon S3 User Guide_, the response includes this header. It includes the `expiry-date` and `rule-id` key-value pairs that provide information about object expiration. The value of the `rule-id` is URL-encoded. + /// + /// This functionality is not supported for directory buckets. String? get expiration; /// Entity tag for the uploaded object. + /// + /// **General purpose buckets** \- To ensure that data is not corrupted traversing the network, for objects where the ETag is the MD5 digest of the object, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value. + /// + /// **Directory buckets** \- The ETag for the object in a directory bucket isn't the MD5 digest of the object. String? get eTag; - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32; - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32C; - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha1; - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha256; - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). + /// + /// For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. ServerSideEncryption? get serverSideEncryption; - /// Version of the object. + /// Version ID of the object. + /// + /// If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) in the _Amazon S3 User Guide_. For information about returning the versioning state of a bucket, see [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). + /// + /// This functionality is not supported for directory buckets. String? get versionId; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// If `x-amz-server-side-encryption` has a valid value of `aws:kms` or `aws:kms:dsse`, this header specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// If `x-amz-server-side-encryption` has a valid value of `aws:kms` or `aws:kms:dsse`, this header indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsKeyId; - /// If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future `GetObject` or `CopyObject` operations on this object. + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future `GetObject` or `CopyObject` operations on this object. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsEncryptionContext; /// Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). + /// + /// This functionality is not supported for directory buckets. bool? get bucketKeyEnabled; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; @override PutObjectOutputPayload getPayload() => PutObjectOutputPayload(); + @override List get props => [ expiration, @@ -185,6 +210,7 @@ abstract class PutObjectOutput bucketKeyEnabled, requestCharged, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('PutObjectOutput') @@ -262,6 +288,7 @@ abstract class PutObjectOutputPayload @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('PutObjectOutputPayload'); @@ -280,6 +307,7 @@ class PutObjectOutputRestXmlSerializer PutObjectOutputPayload, _$PutObjectOutputPayload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -287,6 +315,7 @@ class PutObjectOutputRestXmlSerializer shape: 'restXml', ) ]; + @override PutObjectOutputPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/put_object_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/put_object_request.dart index ff219f1ca8..cfdc3fa98f 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/put_object_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/put_object_request.dart @@ -275,9 +275,15 @@ abstract class PutObjectRequest b.body = const _i2.Stream.empty(); } - /// The canned ACL to apply to the object. For more information, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + /// The canned ACL to apply to the object. For more information, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) in the _Amazon S3 User Guide_. /// - /// This action is not supported by Amazon S3 on Outposts. + /// When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and [Managing ACLs Using the REST API](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) in the _Amazon S3 User Guide_. + /// + /// If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the `bucket-owner-full-control` canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a `400` error with the error code `AccessControlListNotSupported`. For more information, see [Controlling ownership of objects and disabling ACLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) in the _Amazon S3 User Guide_. + /// + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. ObjectCannedAcl? get acl; /// Object data. @@ -285,9 +291,13 @@ abstract class PutObjectRequest /// The bucket name to which the PUT action was initiated. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. + /// + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// Can be used to specify caching behavior along the request/reply chain. For more information, see [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). @@ -306,14 +316,33 @@ abstract class PutObjectRequest _i4.Int64? get contentLength; /// The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see [REST Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + /// + /// The `Content-MD5` header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see [Amazon S3 Object Lock Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get contentMd5; /// A standard MIME type describing the format of the contents. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type](https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type). String? get contentType; - /// Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding `x-amz-checksum` or `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request with the HTTP status code `400 Bad Request`. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. + /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding `x-amz-checksum-_algorithm_` or `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request with the HTTP status code `400 Bad Request`. + /// + /// For the `x-amz-checksum-_algorithm_` header, replace `_algorithm_` with the supported algorithm from the following list: + /// + /// * CRC32 + /// + /// * CRC32C /// - /// If you provide an individual checksum, Amazon S3 ignores any provided `ChecksumAlgorithm` parameter. + /// * SHA1 + /// + /// * SHA256 + /// + /// + /// For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. + /// + /// If the individual checksum value you provide through `x-amz-checksum-_algorithm_` doesn't match the checksum algorithm you set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any provided `ChecksumAlgorithm` parameter and uses the checksum algorithm that matches the provided value in `x-amz-checksum-_algorithm_` . + /// + /// For directory buckets, when you use Amazon Web Services SDKs, `CRC32` is the default checksum algorithm that's used for performance. ChecksumAlgorithm? get checksumAlgorithm; /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. @@ -333,22 +362,30 @@ abstract class PutObjectRequest /// Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the object. /// - /// This action is not supported by Amazon S3 on Outposts. + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantFullControl; /// Allows grantee to read the object data and its metadata. /// - /// This action is not supported by Amazon S3 on Outposts. + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantRead; /// Allows grantee to read the object ACL. /// - /// This action is not supported by Amazon S3 on Outposts. + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantReadAcp; /// Allows grantee to write the ACL for the applicable object. /// - /// This action is not supported by Amazon S3 on Outposts. + /// * This functionality is not supported for directory buckets. + /// + /// * This functionality is not supported for Amazon S3 on Outposts. String? get grantWriteAcp; /// Object key for which the PUT action was initiated. @@ -357,13 +394,21 @@ abstract class PutObjectRequest /// A map of metadata to store with the object in S3. _i5.BuiltMap? get metadata; - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). + /// The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). + /// + /// **General purpose buckets** \- You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the _Amazon S3 User Guide_. + /// + /// **Directory buckets** \- For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) value is supported. ServerSideEncryption? get serverSideEncryption; - /// By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in the _Amazon S3 User Guide_. + /// By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in the _Amazon S3 User Guide_. + /// + /// * For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. + /// + /// * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass? get storageClass; - /// If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object metadata, see [Object Key and Metadata](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + /// If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object metadata, see [Object Key and Metadata](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) in the _Amazon S3 User Guide_. /// /// In the following example, the request header sets the redirect to an object (anotherPage.html) in the same bucket: /// @@ -373,45 +418,69 @@ abstract class PutObjectRequest /// /// `x-amz-website-redirect-location: http://www.example.com/` /// - /// For more information about website hosting in Amazon S3, see [Hosting Websites on Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) and [How to Configure Website Page Redirects](https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + /// For more information about website hosting in Amazon S3, see [Hosting Websites on Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) and [How to Configure Website Page Redirects](https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. String? get websiteRedirectLocation; - /// Specifies the algorithm to use to when encrypting the object (for example, AES256). + /// Specifies the algorithm to use when encrypting the object (for example, `AES256`). + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; /// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKey; /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// If `x-amz-server-side-encryption` has a valid value of `aws:kms` or `aws:kms:dsse`, this header specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify `x-amz-server-side-encryption:aws:kms` or `x-amz-server-side-encryption:aws:kms:dsse`, but do not provide `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the Amazon Web Services managed key (`aws/s3`) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID. + /// If `x-amz-server-side-encryption` has a valid value of `aws:kms` or `aws:kms:dsse`, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify `x-amz-server-side-encryption:aws:kms` or `x-amz-server-side-encryption:aws:kms:dsse`, but do not provide `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the Amazon Web Services managed key (`aws/s3`) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsKeyId; - /// Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future `GetObject` or `CopyObject` operations on this object. + /// Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future `GetObject` or `CopyObject` operations on this object. This value must be explicitly added during `CopyObject` operations. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsEncryptionContext; /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. /// /// Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key. + /// + /// This functionality is not supported for directory buckets. bool? get bucketKeyEnabled; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; /// The tag-set for the object. The tag-set must be encoded as URL Query parameters. (For example, "Key1=Value1") + /// + /// This functionality is not supported for directory buckets. String? get tagging; /// The Object Lock mode that you want to apply to this object. + /// + /// This functionality is not supported for directory buckets. ObjectLockMode? get objectLockMode; /// The date and time when you want this object's Object Lock to expire. Must be formatted as a timestamp parameter. + /// + /// This functionality is not supported for directory buckets. DateTime? get objectLockRetainUntilDate; - /// Specifies whether a legal hold will be applied to this object. For more information about S3 Object Lock, see [Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + /// Specifies whether a legal hold will be applied to this object. For more information about S3 Object Lock, see [Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus? get objectLockLegalHoldStatus; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; @override String labelFor(String key) { @@ -429,6 +498,7 @@ abstract class PutObjectRequest @override _i2.Stream> getPayload() => body; + @override List get props => [ acl, @@ -469,6 +539,7 @@ abstract class PutObjectRequest objectLockLegalHoldStatus, expectedBucketOwner, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('PutObjectRequest') @@ -633,6 +704,7 @@ class PutObjectRequestRestXmlSerializer PutObjectRequest, _$PutObjectRequest, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -640,6 +712,7 @@ class PutObjectRequestRestXmlSerializer shape: 'restXml', ) ]; + @override _i2.Stream> deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/records_event.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/records_event.dart index 7f94cedf73..72428f52b7 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/records_event.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/records_event.dart @@ -35,6 +35,7 @@ abstract class RecordsEvent _i2.Uint8List? get payload; @override List get props => [payload]; + @override String toString() { final helper = newBuiltValueToStringHelper('RecordsEvent') @@ -55,6 +56,7 @@ class RecordsEventRestXmlSerializer RecordsEvent, _$RecordsEvent, ]; + @override Iterable<_i3.ShapeId> get supportedProtocols => const [ _i3.ShapeId( @@ -62,6 +64,7 @@ class RecordsEventRestXmlSerializer shape: 'restXml', ) ]; + @override RecordsEvent deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/replication_status.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/replication_status.dart index 85f3ab2855..5a134e8e38 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/replication_status.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/replication_status.dart @@ -20,20 +20,26 @@ class ReplicationStatus extends _i1.SmithyEnum { 'COMPLETE', ); - static const failed = ReplicationStatus._( + static const completed = ReplicationStatus._( 1, + 'COMPLETED', + 'COMPLETED', + ); + + static const failed = ReplicationStatus._( + 2, 'FAILED', 'FAILED', ); static const pending = ReplicationStatus._( - 2, + 3, 'PENDING', 'PENDING', ); static const replica = ReplicationStatus._( - 3, + 4, 'REPLICA', 'REPLICA', ); @@ -41,6 +47,7 @@ class ReplicationStatus extends _i1.SmithyEnum { /// All values of [ReplicationStatus]. static const values = [ ReplicationStatus.complete, + ReplicationStatus.completed, ReplicationStatus.failed, ReplicationStatus.pending, ReplicationStatus.replica, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_charged.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_charged.dart index 798a6c595e..45b3a7c3f0 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_charged.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_charged.dart @@ -6,6 +6,8 @@ library amplify_storage_s3_dart.s3.model.request_charged; // ignore_for_file: no import 'package:smithy/smithy.dart' as _i1; /// If present, indicates that the requester was successfully charged for the request. +/// +/// This functionality is not supported for directory buckets. class RequestCharged extends _i1.SmithyEnum { const RequestCharged._( super.index, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_payer.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_payer.dart index 56a60e4d57..891f201ec6 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_payer.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_payer.dart @@ -5,7 +5,9 @@ library amplify_storage_s3_dart.s3.model.request_payer; // ignore_for_file: no_l import 'package:smithy/smithy.dart' as _i1; -/// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. +/// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. +/// +/// This functionality is not supported for directory buckets. class RequestPayer extends _i1.SmithyEnum { const RequestPayer._( super.index, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_progress.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_progress.dart index 2471cdb385..2ac09e1c7d 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_progress.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/request_progress.dart @@ -33,6 +33,7 @@ abstract class RequestProgress bool? get enabled; @override List get props => [enabled]; + @override String toString() { final helper = newBuiltValueToStringHelper('RequestProgress') @@ -53,6 +54,7 @@ class RequestProgressRestXmlSerializer RequestProgress, _$RequestProgress, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -60,6 +62,7 @@ class RequestProgressRestXmlSerializer shape: 'restXml', ) ]; + @override RequestProgress deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/restore_status.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/restore_status.dart index 5f7de6f4e3..4f2f1ee97d 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/restore_status.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/restore_status.dart @@ -11,10 +11,14 @@ import 'package:smithy/smithy.dart' as _i2; part 'restore_status.g.dart'; /// Specifies the restoration status of an object. Objects in certain storage classes must be restored before they can be retrieved. For more information about these storage classes and how to work with archived objects, see [Working with archived objects](https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) in the _Amazon S3 User Guide_. +/// +/// This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects. abstract class RestoreStatus with _i1.AWSEquatable implements Built { /// Specifies the restoration status of an object. Objects in certain storage classes must be restored before they can be retrieved. For more information about these storage classes and how to work with archived objects, see [Working with archived objects](https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects. factory RestoreStatus({ bool? isRestoreInProgress, DateTime? restoreExpiryDate, @@ -26,6 +30,8 @@ abstract class RestoreStatus } /// Specifies the restoration status of an object. Objects in certain storage classes must be restored before they can be retrieved. For more information about these storage classes and how to work with archived objects, see [Working with archived objects](https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects. factory RestoreStatus.build([void Function(RestoreStatusBuilder) updates]) = _$RestoreStatus; @@ -55,6 +61,7 @@ abstract class RestoreStatus isRestoreInProgress, restoreExpiryDate, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('RestoreStatus') @@ -79,6 +86,7 @@ class RestoreStatusRestXmlSerializer RestoreStatus, _$RestoreStatus, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -86,6 +94,7 @@ class RestoreStatusRestXmlSerializer shape: 'restXml', ) ]; + @override RestoreStatus deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/scan_range.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/scan_range.dart index 7ac62276b8..5e552b0210 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/scan_range.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/scan_range.dart @@ -46,6 +46,7 @@ abstract class ScanRange start, end, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('ScanRange') @@ -70,6 +71,7 @@ class ScanRangeRestXmlSerializer ScanRange, _$ScanRange, ]; + @override Iterable<_i3.ShapeId> get supportedProtocols => const [ _i3.ShapeId( @@ -77,6 +79,7 @@ class ScanRangeRestXmlSerializer shape: 'restXml', ) ]; + @override ScanRange deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_event_stream.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_event_stream.dart index e8e23402a1..c04e84f325 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_event_stream.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_event_stream.dart @@ -58,8 +58,10 @@ sealed class SelectObjectContentEventStream /// The End Event. EndEvent? get end => null; + @override Object get value => (records ?? stats ?? progress ?? cont ?? end)!; + @override String toString() { final helper = @@ -181,6 +183,7 @@ class SelectObjectContentEventStreamRestXmlSerializer SelectObjectContentEventStreamCont$, SelectObjectContentEventStreamEnd$, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -188,6 +191,7 @@ class SelectObjectContentEventStreamRestXmlSerializer shape: 'restXml', ) ]; + @override SelectObjectContentEventStream deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_output.dart index ff9e13fd6e..a4a043571e 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_output.dart @@ -41,8 +41,10 @@ abstract class SelectObjectContentOutput SelectObjectContentEventStream? get payload; @override SelectObjectContentEventStream? getPayload() => payload; + @override List get props => [payload]; + @override String toString() { final helper = newBuiltValueToStringHelper('SelectObjectContentOutput') diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_request.dart index 264ea3ac50..097537aa9e 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/select_object_content_request.dart @@ -150,7 +150,7 @@ abstract class SelectObjectContentRequest /// * `50` \- process only the records within the last 50 bytes of the file. ScanRange? get scanRange; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; @override String labelFor(String key) { @@ -180,6 +180,7 @@ abstract class SelectObjectContentRequest b.scanRange.replace(scanRange!); } }); + @override List get props => [ bucket, @@ -195,6 +196,7 @@ abstract class SelectObjectContentRequest scanRange, expectedBucketOwner, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('SelectObjectContentRequest') @@ -297,6 +299,7 @@ abstract class SelectObjectContentRequestPayload requestProgress, scanRange, ]; + @override String toString() { final helper = @@ -341,6 +344,7 @@ class SelectObjectContentRequestRestXmlSerializer SelectObjectContentRequestPayload, _$SelectObjectContentRequestPayload, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -348,6 +352,7 @@ class SelectObjectContentRequestRestXmlSerializer shape: 'restXml', ) ]; + @override SelectObjectContentRequestPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/stats.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/stats.dart index e7ff0589c1..6c1ae03172 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/stats.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/stats.dart @@ -51,6 +51,7 @@ abstract class Stats bytesProcessed, bytesReturned, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('Stats') @@ -78,6 +79,7 @@ class StatsRestXmlSerializer extends _i3.StructuredSmithySerializer { Stats, _$Stats, ]; + @override Iterable<_i3.ShapeId> get supportedProtocols => const [ _i3.ShapeId( @@ -85,6 +87,7 @@ class StatsRestXmlSerializer extends _i3.StructuredSmithySerializer { shape: 'restXml', ) ]; + @override Stats deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/stats_event.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/stats_event.dart index e663ea568c..088e84b2f8 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/stats_event.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/stats_event.dart @@ -34,6 +34,7 @@ abstract class StatsEvent Stats? get details; @override List get props => [details]; + @override String toString() { final helper = newBuiltValueToStringHelper('StatsEvent') @@ -54,6 +55,7 @@ class StatsEventRestXmlSerializer StatsEvent, _$StatsEvent, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -61,6 +63,7 @@ class StatsEventRestXmlSerializer shape: 'restXml', ) ]; + @override StatsEvent deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/storage_class.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/storage_class.dart index 1e34b0040c..8fe39ea5a9 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/storage_class.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/storage_class.dart @@ -20,56 +20,62 @@ class StorageClass extends _i1.SmithyEnum { 'DEEP_ARCHIVE', ); - static const glacier = StorageClass._( + static const expressOnezone = StorageClass._( 1, + 'EXPRESS_ONEZONE', + 'EXPRESS_ONEZONE', + ); + + static const glacier = StorageClass._( + 2, 'GLACIER', 'GLACIER', ); static const glacierIr = StorageClass._( - 2, + 3, 'GLACIER_IR', 'GLACIER_IR', ); static const intelligentTiering = StorageClass._( - 3, + 4, 'INTELLIGENT_TIERING', 'INTELLIGENT_TIERING', ); static const onezoneIa = StorageClass._( - 4, + 5, 'ONEZONE_IA', 'ONEZONE_IA', ); static const outposts = StorageClass._( - 5, + 6, 'OUTPOSTS', 'OUTPOSTS', ); static const reducedRedundancy = StorageClass._( - 6, + 7, 'REDUCED_REDUNDANCY', 'REDUCED_REDUNDANCY', ); static const snow = StorageClass._( - 7, + 8, 'SNOW', 'SNOW', ); static const standard = StorageClass._( - 8, + 9, 'STANDARD', 'STANDARD', ); static const standardIa = StorageClass._( - 9, + 10, 'STANDARD_IA', 'STANDARD_IA', ); @@ -77,6 +83,7 @@ class StorageClass extends _i1.SmithyEnum { /// All values of [StorageClass]. static const values = [ StorageClass.deepArchive, + StorageClass.expressOnezone, StorageClass.glacier, StorageClass.glacierIr, StorageClass.intelligentTiering, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_copy_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_copy_output.dart index 9590bd4cdc..5ee416a95a 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_copy_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_copy_output.dart @@ -97,30 +97,45 @@ abstract class UploadPartCopyOutput ]; /// The version of the source object that was copied, if you have enabled versioning on the source bucket. + /// + /// This functionality is not supported when the source object is in a directory bucket. String? get copySourceVersionId; /// Container for all response elements. CopyPartResult? get copyPartResult; - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`). + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, `AES256`, `aws:kms`). + /// + /// For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. ServerSideEncryption? get serverSideEncryption; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsKeyId; /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). + /// + /// This functionality is not supported for directory buckets. bool? get bucketKeyEnabled; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; @override CopyPartResult? getPayload() => copyPartResult ?? CopyPartResult(); + @override List get props => [ copySourceVersionId, @@ -132,6 +147,7 @@ abstract class UploadPartCopyOutput bucketKeyEnabled, requestCharged, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('UploadPartCopyOutput') @@ -180,6 +196,7 @@ class UploadPartCopyOutputRestXmlSerializer UploadPartCopyOutput, _$UploadPartCopyOutput, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -187,6 +204,7 @@ class UploadPartCopyOutputRestXmlSerializer shape: 'restXml', ) ]; + @override CopyPartResult deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_copy_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_copy_request.dart index d815710c0a..fc3295bc0f 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_copy_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_copy_request.dart @@ -166,9 +166,13 @@ abstract class UploadPartCopyRequest /// The bucket name. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. + /// + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an [access point](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): @@ -177,24 +181,63 @@ abstract class UploadPartCopyRequest /// /// * For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format `arn:aws:s3:::accesspoint//object/`. For example, to copy the object `reports/january.pdf` through access point `my-access-point` owned by account `123456789012` in Region `us-west-2`, use the URL encoding of `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. The value must be URL encoded. /// - /// Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same Amazon Web Services Region. + /// * Amazon S3 supports copy operations using Access points only when the source and destination buckets are in the same Amazon Web Services Region. + /// + /// * Access points are not supported by directory buckets. + /// /// /// Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format `arn:aws:s3-outposts:::outpost//object/`. For example, to copy the object `reports/january.pdf` through outpost `my-outpost` owned by account `123456789012` in Region `us-west-2`, use the URL encoding of `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. The value must be URL-encoded. /// /// - /// To copy a specific version of an object, append `?versionId=` to the value (for example, `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). If you don't specify a version ID, Amazon S3 copies the latest version of the source object. + /// If your bucket has versioning enabled, you could have multiple versions of the same object. By default, `x-amz-copy-source` identifies the current version of the source object to copy. To copy a specific version of the source object to copy, append `?versionId=` to the `x-amz-copy-source` request header (for example, `x-amz-copy-source: /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). + /// + /// If the current version is a delete marker and you don't specify a versionId in the `x-amz-copy-source` request header, Amazon S3 returns a `404 Not Found` error, because the object does not exist. If you specify versionId in the `x-amz-copy-source` and the versionId is a delete marker, Amazon S3 returns an HTTP `400 Bad Request` error, because you are not allowed to specify a delete marker as a version for the `x-amz-copy-source`. + /// + /// **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. String get copySource; /// Copies the object if its entity tag (ETag) matches the specified tag. + /// + /// If both of the `x-amz-copy-source-if-match` and `x-amz-copy-source-if-unmodified-since` headers are present in the request as follows: + /// + /// `x-amz-copy-source-if-match` condition evaluates to `true`, and; + /// + /// `x-amz-copy-source-if-unmodified-since` condition evaluates to `false`; + /// + /// Amazon S3 returns `200 OK` and copies the data. String? get copySourceIfMatch; /// Copies the object if it has been modified since the specified time. + /// + /// If both of the `x-amz-copy-source-if-none-match` and `x-amz-copy-source-if-modified-since` headers are present in the request as follows: + /// + /// `x-amz-copy-source-if-none-match` condition evaluates to `false`, and; + /// + /// `x-amz-copy-source-if-modified-since` condition evaluates to `true`; + /// + /// Amazon S3 returns `412 Precondition Failed` response code. DateTime? get copySourceIfModifiedSince; /// Copies the object if its entity tag (ETag) is different than the specified ETag. + /// + /// If both of the `x-amz-copy-source-if-none-match` and `x-amz-copy-source-if-modified-since` headers are present in the request as follows: + /// + /// `x-amz-copy-source-if-none-match` condition evaluates to `false`, and; + /// + /// `x-amz-copy-source-if-modified-since` condition evaluates to `true`; + /// + /// Amazon S3 returns `412 Precondition Failed` response code. String? get copySourceIfNoneMatch; /// Copies the object if it hasn't been modified since the specified time. + /// + /// If both of the `x-amz-copy-source-if-match` and `x-amz-copy-source-if-unmodified-since` headers are present in the request as follows: + /// + /// `x-amz-copy-source-if-match` condition evaluates to `true`, and; + /// + /// `x-amz-copy-source-if-unmodified-since` condition evaluates to `false`; + /// + /// Amazon S3 returns `200 OK` and copies the data. DateTime? get copySourceIfUnmodifiedSince; /// The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the first 10 bytes of the source. You can copy a range only if the source object is greater than 5 MB. @@ -209,31 +252,45 @@ abstract class UploadPartCopyRequest /// Upload ID identifying the multipart upload whose part is being copied. String get uploadId; - /// Specifies the algorithm to use to when encrypting the object (for example, AES256). + /// Specifies the algorithm to use when encrypting the object (for example, AES256). + /// + /// This functionality is not supported when the destination bucket is a directory bucket. String? get sseCustomerAlgorithm; /// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header. This must be the same encryption key specified in the initiate multipart upload request. + /// + /// This functionality is not supported when the destination bucket is a directory bucket. String? get sseCustomerKey; /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + /// + /// This functionality is not supported when the destination bucket is a directory bucket. String? get sseCustomerKeyMd5; - /// Specifies the algorithm to use when decrypting the source object (for example, AES256). + /// Specifies the algorithm to use when decrypting the source object (for example, `AES256`). + /// + /// This functionality is not supported when the source object is in a directory bucket. String? get copySourceSseCustomerAlgorithm; /// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created. + /// + /// This functionality is not supported when the source object is in a directory bucket. String? get copySourceSseCustomerKey; /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + /// + /// This functionality is not supported when the source object is in a directory bucket. String? get copySourceSseCustomerKeyMd5; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; - /// The account ID of the expected destination bucket owner. If the destination bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected destination bucket owner. If the account ID that you provide does not match the actual owner of the destination bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; - /// The account ID of the expected source bucket owner. If the source bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected source bucket owner. If the account ID that you provide does not match the actual owner of the source bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedSourceBucketOwner; @override String labelFor(String key) { @@ -251,6 +308,7 @@ abstract class UploadPartCopyRequest @override UploadPartCopyRequestPayload getPayload() => UploadPartCopyRequestPayload(); + @override List get props => [ bucket, @@ -273,6 +331,7 @@ abstract class UploadPartCopyRequest expectedBucketOwner, expectedSourceBucketOwner, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('UploadPartCopyRequest') @@ -372,6 +431,7 @@ abstract class UploadPartCopyRequestPayload @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('UploadPartCopyRequestPayload'); @@ -391,6 +451,7 @@ class UploadPartCopyRequestRestXmlSerializer UploadPartCopyRequestPayload, _$UploadPartCopyRequestPayload, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -398,6 +459,7 @@ class UploadPartCopyRequestRestXmlSerializer shape: 'restXml', ) ]; + @override UploadPartCopyRequestPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_output.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_output.dart index fa7aff7b22..e9b5f67672 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_output.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_output.dart @@ -109,40 +109,53 @@ abstract class UploadPartOutput static const List<_i2.SmithySerializer> serializers = [UploadPartOutputRestXmlSerializer()]; - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, `AES256`, `aws:kms`). + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, `AES256`, `aws:kms`). + /// + /// For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. ServerSideEncryption? get serverSideEncryption; /// Entity tag for the uploaded object. String? get eTag; - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32; - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumCrc32C; - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha1; - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. + /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) in the _Amazon S3 User Guide_. String? get checksumSha256; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; - /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key. + /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key was used for the object. + /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. + /// + /// This functionality is not supported for directory buckets. String? get ssekmsKeyId; /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). + /// + /// This functionality is not supported for directory buckets. bool? get bucketKeyEnabled; /// If present, indicates that the requester was successfully charged for the request. + /// + /// This functionality is not supported for directory buckets. RequestCharged? get requestCharged; @override UploadPartOutputPayload getPayload() => UploadPartOutputPayload(); + @override List get props => [ serverSideEncryption, @@ -157,6 +170,7 @@ abstract class UploadPartOutput bucketKeyEnabled, requestCharged, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('UploadPartOutput') @@ -222,6 +236,7 @@ abstract class UploadPartOutputPayload @override List get props => []; + @override String toString() { final helper = newBuiltValueToStringHelper('UploadPartOutputPayload'); @@ -240,6 +255,7 @@ class UploadPartOutputRestXmlSerializer UploadPartOutputPayload, _$UploadPartOutputPayload, ]; + @override Iterable<_i2.ShapeId> get supportedProtocols => const [ _i2.ShapeId( @@ -247,6 +263,7 @@ class UploadPartOutputRestXmlSerializer shape: 'restXml', ) ]; + @override UploadPartOutputPayload deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_request.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_request.dart index fc0072fe2d..2e137c9bc8 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_request.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/model/upload_part_request.dart @@ -150,18 +150,24 @@ abstract class UploadPartRequest /// The name of the bucket to which the multipart upload was initiated. /// - /// When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format `_bucket\_base\_name_--_az-id_--x-s3` (for example, `_DOC-EXAMPLE-BUCKET_--_usw2-az2_--x-s3`). For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the _Amazon S3 User Guide_. /// - /// When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. + /// **Access points** \- When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form _AccessPointName_-_AccountId_.s3-accesspoint._Region_.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see [Using access points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) in the _Amazon S3 User Guide_. + /// + /// Access points and Object Lambda access points are not supported by directory buckets. + /// + /// **S3 on Outposts** \- When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form `_AccessPointName_-_AccountId_._outpostID_.s3-outposts._Region_.amazonaws.com`. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see [What is S3 on Outposts?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the _Amazon S3 User Guide_. String get bucket; /// Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. _i4.Int64? get contentLength; /// The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameter is required if object lock parameters are specified. + /// + /// This functionality is not supported for directory buckets. String? get contentMd5; - /// Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding `x-amz-checksum` or `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request with the HTTP status code `400 Bad Request`. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. + /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding `x-amz-checksum` or `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request with the HTTP status code `400 Bad Request`. For more information, see [Checking object integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) in the _Amazon S3 User Guide_. /// /// If you provide an individual checksum, Amazon S3 ignores any provided `ChecksumAlgorithm` parameter. /// @@ -189,19 +195,27 @@ abstract class UploadPartRequest /// Upload ID identifying the multipart upload whose part is being uploaded. String get uploadId; - /// Specifies the algorithm to use to when encrypting the object (for example, AES256). + /// Specifies the algorithm to use when encrypting the object (for example, AES256). + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerAlgorithm; /// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm header`. This must be the same encryption key specified in the initiate multipart upload request. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKey; /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + /// + /// This functionality is not supported for directory buckets. String? get sseCustomerKeyMd5; - /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. RequestPayer? get requestPayer; - /// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code `403 Forbidden` (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code `403 Forbidden` (access denied). String? get expectedBucketOwner; @override String labelFor(String key) { @@ -219,6 +233,7 @@ abstract class UploadPartRequest @override _i2.Stream> getPayload() => body; + @override List get props => [ body, @@ -239,6 +254,7 @@ abstract class UploadPartRequest requestPayer, expectedBucketOwner, ]; + @override String toString() { final helper = newBuiltValueToStringHelper('UploadPartRequest') @@ -323,6 +339,7 @@ class UploadPartRequestRestXmlSerializer UploadPartRequest, _$UploadPartRequest, ]; + @override Iterable<_i1.ShapeId> get supportedProtocols => const [ _i1.ShapeId( @@ -330,6 +347,7 @@ class UploadPartRequestRestXmlSerializer shape: 'restXml', ) ]; + @override _i2.Stream> deserialize( Serializers serializers, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/abort_multipart_upload_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/abort_multipart_upload_operation.dart index c8a7203ad5..289a479e63 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/abort_multipart_upload_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/abort_multipart_upload_operation.dart @@ -15,11 +15,22 @@ import 'package:aws_signature_v4/aws_signature_v4.dart' as _i3; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i2; -/// This action aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts. +/// This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts. /// -/// To verify that all parts have been removed, so you don't get charged for the part storage, you should call the [ListParts](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) action and ensure that the parts list is empty. +/// To verify that all parts have been removed and prevent getting charged for the part storage, you should call the [ListParts](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) API operation and ensure that the parts list is empty. /// -/// For information about permissions required to use the multipart upload, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +/// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. +/// +/// Permissions +/// +/// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. +/// +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . +/// +/// +/// HTTP Host header syntax +/// +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `AbortMultipartUpload`: /// @@ -37,11 +48,22 @@ class AbortMultipartUploadOperation extends _i1.HttpOperation< AbortMultipartUploadRequest, AbortMultipartUploadOutputPayload, AbortMultipartUploadOutput> { - /// This action aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts. + /// This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts. + /// + /// To verify that all parts have been removed and prevent getting charged for the part storage, you should call the [ListParts](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) API operation and ensure that the parts list is empty. + /// + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// + /// Permissions + /// + /// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// - /// To verify that all parts have been removed, so you don't get charged for the part storage, you should call the [ListParts](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) action and ensure that the parts list is empty. + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// For information about permissions required to use the multipart upload, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `AbortMultipartUpload`: /// @@ -138,8 +160,10 @@ class AbortMultipartUploadOperation extends _i1.HttpOperation< input.uploadId, ); }); + @override int successCode([AbortMultipartUploadOutput? output]) => 204; + @override AbortMultipartUploadOutput buildOutput( AbortMultipartUploadOutputPayload payload, @@ -149,6 +173,7 @@ class AbortMultipartUploadOperation extends _i1.HttpOperation< payload, response, ); + @override List<_i1.SmithyError> get errorTypes => const [ _i1.SmithyError( @@ -158,13 +183,17 @@ class AbortMultipartUploadOperation extends _i1.HttpOperation< ), _i1.ErrorKind.client, NoSuchUpload, + statusCode: 404, builder: NoSuchUpload.fromResponse, ) ]; + @override String get runtimeTypeName => 'AbortMultipartUpload'; + @override _i2.AWSRetryer get retryer => _i2.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -185,6 +214,7 @@ class AbortMultipartUploadOperation extends _i1.HttpOperation< @override _i1.Endpoint get endpoint => _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( AbortMultipartUploadRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/complete_multipart_upload_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/complete_multipart_upload_operation.dart index 41f5a1ba08..a0b28f330d 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/complete_multipart_upload_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/complete_multipart_upload_operation.dart @@ -17,44 +17,55 @@ import 'package:smithy_aws/smithy_aws.dart' as _i2; /// Completes a multipart upload by assembling previously uploaded parts. /// -/// You first initiate the multipart upload and then upload all parts using the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation. After successfully uploading all relevant parts of an upload, you call this action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This action concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the `ETag` value, returned after that part was uploaded. +/// You first initiate the multipart upload and then upload all parts using the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation or the [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) operation. After successfully uploading all relevant parts of an upload, you call this `CompleteMultipartUpload` operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the `PartNumber` value and the `ETag` value that are returned after that part was uploaded. /// -/// Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK response has been sent. This means that a `200 OK` response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error). +/// The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a `200 OK` response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial `200 OK` response has been sent. This means that a `200 OK` response can contain either a success or an error. The error response might be embedded in the `200 OK` response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). /// /// Note that if `CompleteMultipartUpload` fails, applications should be prepared to retry the failed requests. For more information, see [Amazon S3 Error Best Practices](https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). /// -/// You cannot use `Content-Type: application/x-www-form-urlencoded` with Complete Multipart Upload requests. Also, if you do not provide a `Content-Type` header, `CompleteMultipartUpload` returns a 200 OK response. +/// You can't use `Content-Type: application/x-www-form-urlencoded` for the CompleteMultipartUpload requests. Also, if you don't provide a `Content-Type` header, `CompleteMultipartUpload` can still return a `200 OK` response. /// -/// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +/// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. /// -/// For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +/// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// -/// `CompleteMultipartUpload` has the following special errors: +/// Permissions /// -/// * Error code: `EntityTooSmall` +/// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. +/// +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . +/// +/// +/// Special errors +/// +/// * Error Code: `EntityTooSmall` /// /// * Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part. /// -/// * 400 Bad Request +/// * HTTP Status Code: 400 Bad Request /// -/// * Error code: `InvalidPart` +/// * Error Code: `InvalidPart` /// -/// * Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag. +/// * Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag. /// -/// * 400 Bad Request +/// * HTTP Status Code: 400 Bad Request /// -/// * Error code: `InvalidPartOrder` +/// * Error Code: `InvalidPartOrder` /// /// * Description: The list of parts was not in ascending order. The parts list must be specified in order by part number. /// -/// * 400 Bad Request +/// * HTTP Status Code: 400 Bad Request /// -/// * Error code: `NoSuchUpload` +/// * Error Code: `NoSuchUpload` /// /// * Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. /// -/// * 404 Not Found +/// * HTTP Status Code: 404 Not Found +/// /// +/// HTTP Host header syntax +/// +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `CompleteMultipartUpload`: /// @@ -74,45 +85,56 @@ class CompleteMultipartUploadOperation extends _i1.HttpOperation< CompleteMultipartUploadOutput> { /// Completes a multipart upload by assembling previously uploaded parts. /// - /// You first initiate the multipart upload and then upload all parts using the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation. After successfully uploading all relevant parts of an upload, you call this action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This action concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the `ETag` value, returned after that part was uploaded. + /// You first initiate the multipart upload and then upload all parts using the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation or the [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) operation. After successfully uploading all relevant parts of an upload, you call this `CompleteMultipartUpload` operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the `PartNumber` value and the `ETag` value that are returned after that part was uploaded. /// - /// Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK response has been sent. This means that a `200 OK` response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error). + /// The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a `200 OK` response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial `200 OK` response has been sent. This means that a `200 OK` response can contain either a success or an error. The error response might be embedded in the `200 OK` response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). /// /// Note that if `CompleteMultipartUpload` fails, applications should be prepared to retry the failed requests. For more information, see [Amazon S3 Error Best Practices](https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). /// - /// You cannot use `Content-Type: application/x-www-form-urlencoded` with Complete Multipart Upload requests. Also, if you do not provide a `Content-Type` header, `CompleteMultipartUpload` returns a 200 OK response. + /// You can't use `Content-Type: application/x-www-form-urlencoded` for the CompleteMultipartUpload requests. Also, if you don't provide a `Content-Type` header, `CompleteMultipartUpload` can still return a `200 OK` response. + /// + /// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. + /// + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// + /// Permissions + /// + /// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// - /// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). /// - /// `CompleteMultipartUpload` has the following special errors: + /// Special errors /// - /// * Error code: `EntityTooSmall` + /// * Error Code: `EntityTooSmall` /// /// * Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part. /// - /// * 400 Bad Request + /// * HTTP Status Code: 400 Bad Request /// - /// * Error code: `InvalidPart` + /// * Error Code: `InvalidPart` /// - /// * Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag. + /// * Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag. /// - /// * 400 Bad Request + /// * HTTP Status Code: 400 Bad Request /// - /// * Error code: `InvalidPartOrder` + /// * Error Code: `InvalidPartOrder` /// /// * Description: The list of parts was not in ascending order. The parts list must be specified in order by part number. /// - /// * 400 Bad Request + /// * HTTP Status Code: 400 Bad Request /// - /// * Error code: `NoSuchUpload` + /// * Error Code: `NoSuchUpload` /// /// * Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. /// - /// * 404 Not Found + /// * HTTP Status Code: 404 Not Found /// /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. + /// /// The following operations are related to `CompleteMultipartUpload`: /// /// * [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) @@ -249,8 +271,10 @@ class CompleteMultipartUploadOperation extends _i1.HttpOperation< input.uploadId, ); }); + @override int successCode([CompleteMultipartUploadOutput? output]) => 200; + @override CompleteMultipartUploadOutput buildOutput( CompleteMultipartUploadOutputPayload payload, @@ -260,12 +284,16 @@ class CompleteMultipartUploadOperation extends _i1.HttpOperation< payload, response, ); + @override List<_i1.SmithyError> get errorTypes => const []; + @override String get runtimeTypeName => 'CompleteMultipartUpload'; + @override _i2.AWSRetryer get retryer => _i2.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -286,6 +314,7 @@ class CompleteMultipartUploadOperation extends _i1.HttpOperation< @override _i1.Endpoint get endpoint => _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( CompleteMultipartUploadRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/copy_object_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/copy_object_operation.dart index b388e983d8..3b42ebbc32 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/copy_object_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/copy_object_operation.dart @@ -20,90 +20,64 @@ import 'package:smithy_aws/smithy_aws.dart' as _i2; /// /// You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API](https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). /// -/// All copy requests must be authenticated. Additionally, you must have _read_ access to the source object and _write_ access to the destination bucket. For more information, see [REST Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. +/// You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets. /// -/// A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the `200 OK` response. This means that a `200 OK` response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error). +/// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// -/// If the copy is successful, you receive a response with information about the copied object. +/// Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. /// -/// If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body. +/// Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a `400 Bad Request` error. For more information, see [Transfer Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). /// -/// The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. For pricing information, see [Amazon S3 pricing](http://aws.amazon.com/s3/pricing/). -/// -/// Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 `Bad Request` error. For more information, see [Transfer Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). -/// -/// Metadata -/// -/// When copying an object, you can preserve all metadata (the default) or specify new metadata. However, the access control list (ACL) is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see [Using ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -/// -/// To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the `x-amz-metadata-directive` header. When you grant permissions, you can use the `s3:x-amz-metadata-directive` condition key to enforce certain metadata behavior when objects are uploaded. For more information, see [Specifying Conditions in a Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) in the _Amazon S3 User Guide_. For a complete list of Amazon S3-specific condition keys, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). -/// -/// `x-amz-website-redirect-location` is unique to each object and must be specified in the request headers to copy the value. -/// -/// x-amz-copy-source-if Headers -/// -/// To only copy an object under certain conditions, such as whether the `Etag` matches or whether the object was modified before or after a specified date, use the following request parameters: -/// -/// * `x-amz-copy-source-if-match` -/// -/// * `x-amz-copy-source-if-none-match` +/// Authentication and authorization /// -/// * `x-amz-copy-source-if-unmodified-since` +/// All `CopyObject` requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the `x-amz-` prefix, including `x-amz-copy-source`, must be signed. For more information, see [REST Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). /// -/// * `x-amz-copy-source-if-modified-since` +/// **Directory buckets** \- You must use the IAM credentials to authenticate and authorize your access to the `CopyObject` API operation, instead of using the temporary security credentials through the `CreateSession` API operation. /// +/// Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. /// -/// If both the `x-amz-copy-source-if-match` and `x-amz-copy-source-if-unmodified-since` headers are present in the request and evaluate as follows, Amazon S3 returns `200 OK` and copies the data: +/// Permissions /// -/// * `x-amz-copy-source-if-match` condition evaluates to true +/// You must have _read_ access to the source object and _write_ access to the destination bucket. /// -/// * `x-amz-copy-source-if-unmodified-since` condition evaluates to false +/// * **General purpose bucket permissions** \- You must have permissions in an IAM policy based on the source and destination bucket types in a `CopyObject` operation. /// +/// * If the source object is in a general purpose bucket, you must have **`s3:GetObject`** permission to read the source object that is being copied. /// -/// If both the `x-amz-copy-source-if-none-match` and `x-amz-copy-source-if-modified-since` headers are present in the request and evaluate as follows, Amazon S3 returns the `412 Precondition Failed` response code: +/// * If the destination bucket is a general purpose bucket, you must have **`s3:PubObject`** permission to write the object copy to the destination bucket. /// -/// * `x-amz-copy-source-if-none-match` condition evaluates to false +/// * **Directory bucket permissions** \- You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a `CopyObject` operation. /// -/// * `x-amz-copy-source-if-modified-since` condition evaluates to true +/// * If the source object that you want to copy is in a directory bucket, you must have the **`s3express:CreateSession`** permission in the `Action` element of a policy to read the object. By default, the session is in the `ReadWrite` mode. If you want to restrict the access, you can explicitly set the `s3express:SessionMode` condition key to `ReadOnly` on the copy source bucket. /// +/// * If the copy destination is a directory bucket, you must have the **`s3express:CreateSession`** permission in the `Action` element of a policy to write the object to the destination. The `s3express:SessionMode` condition key can't be set to `ReadOnly` on the copy destination bucket. /// -/// All headers with the `x-amz-` prefix, including `x-amz-copy-source`, must be signed. /// -/// Server-side encryption +/// For example policies, see [Example bucket policies for S3 Express One Zone](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) in the _Amazon S3 User Guide_. /// -/// Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy. /// -/// When you perform a `CopyObject` operation, if you want to use a different type of encryption setting for the target object, you can use other appropriate encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. For more information about server-side encryption, see [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +/// Response and special errors /// -/// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the _Amazon S3 User Guide_. +/// When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the `Content-Length`. You always need to read the entire response body to check if the copy succeeds. to keep the connection alive while we copy the data. /// -/// Access Control List (ACL)-Specific Request Headers +/// * If the copy is successful, you receive a response with information about the copied object. /// -/// When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups that are defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and [Managing ACLs Using the REST API](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +/// * A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A `200 OK` response can contain either a success or an error. /// -/// If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept `PUT` requests that don't specify an ACL or `PUT` requests that specify bucket owner full control ACLs, such as the `bucket-owner-full-control` canned ACL or an equivalent form of this ACL expressed in the XML format. +/// * If the error occurs before the copy action starts, you receive a standard Amazon S3 error. /// -/// For more information, see [Controlling ownership of objects and disabling ACLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) in the _Amazon S3 User Guide_. +/// * If the error occurs during the copy operation, the error response is embedded in the `200 OK` response. For example, in a cross-region copy, you may encounter throttling and receive a `200 OK` response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3](repost.aws/knowledge-center/s3-resolve-200-internalerror). The `200 OK` status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a `200 OK` response. You must stay connected to Amazon S3 until the entire response is successfully received and processed. /// -/// If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner. +/// If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). /// -/// Checksums /// -/// When copying an object, if it has a checksum, that checksum will be copied to the new object by default. When you copy the object over, you can optionally specify a different checksum algorithm to use with the `x-amz-checksum-algorithm` header. +/// Charge /// -/// Storage Class Options -/// -/// You can use the `CopyObject` action to change the storage class of an object that is already stored in Amazon S3 by using the `StorageClass` parameter. For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in the _Amazon S3 User Guide_. -/// -/// If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). For more information, see [Copying Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). -/// -/// Versioning -/// -/// By default, `x-amz-copy-source` header identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the `versionId` subresource. +/// The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. For pricing information, see [Amazon S3 pricing](http://aws.amazon.com/s3/pricing/). /// -/// If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the `x-amz-version-id` response header in the response. +/// HTTP Host header syntax /// -/// If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null. +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `CopyObject`: /// @@ -116,90 +90,64 @@ class CopyObjectOperation extends _i1.HttpOperation 200; + @override CopyObjectOutput buildOutput( CopyObjectResult? payload, @@ -483,6 +433,7 @@ class CopyObjectOperation extends _i1.HttpOperation get errorTypes => const [ _i1.SmithyError( @@ -492,13 +443,17 @@ class CopyObjectOperation extends _i1.HttpOperation 'CopyObject'; + @override _i2.AWSRetryer get retryer => _i2.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -519,6 +474,7 @@ class CopyObjectOperation extends _i1.HttpOperation _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( CopyObjectRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/create_multipart_upload_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/create_multipart_upload_operation.dart index 61849c3f13..d293d0ce4f 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/create_multipart_upload_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/create_multipart_upload_operation.dart @@ -14,121 +14,71 @@ import 'package:aws_signature_v4/aws_signature_v4.dart' as _i3; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i2; -/// This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). You also include this upload ID in the final request to either complete or abort the multipart upload request. +/// This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_. /// -/// For more information about multipart uploads, see [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +/// After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload. /// -/// If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +/// If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). /// -/// For information about the permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +/// * **Directory buckets** \- S3 Lifecycle is not supported by directory buckets. /// -/// For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +/// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// -/// After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload. /// -/// Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a KMS key, an Amazon S3 managed key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) and [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) requests must match the headers you used in the request to initiate the upload by using `CreateMultipartUpload`. You can request that Amazon S3 save the uploaded parts encrypted with server-side encryption with an Amazon S3 managed key (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C). +/// Request signing /// -/// To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the `kms:Decrypt` and `kms:GenerateDataKey*` actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see [Multipart upload API and permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) and [Protecting data using server-side encryption with Amazon Web Services KMS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. +/// For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) in the _Amazon S3 User Guide_. /// -/// If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role. +/// Permissions /// -/// For more information, see [Protecting Data Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +/// * **General purpose bucket permissions** \- For information about the permissions required to use the multipart upload API, see [Multipart upload and permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// -/// Access Permissions +/// To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the `kms:Decrypt` and `kms:GenerateDataKey*` actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see [Multipart upload API and permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) and [Protecting data using server-side encryption with Amazon Web Services KMS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. /// -/// When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers: +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// -/// * Specify a canned ACL with the `x-amz-acl` request header. For more information, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). /// -/// * Specify access permissions explicitly with the `x-amz-grant-read`, `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and `x-amz-grant-full-control` headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +/// Encryption /// +/// * **General purpose buckets** \- Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) and [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) requests must match the headers you used in the `CreateMultipartUpload` request. /// -/// You can use either a canned ACL or specify access permissions explicitly. You cannot do both. +/// * Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (`aws/s3`) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request. /// -/// Server-Side- Encryption-Specific Request Headers +/// * `x-amz-server-side-encryption` /// -/// Amazon S3 encrypts data by using server-side encryption with an Amazon S3 managed key (SSE-S3) by default. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can request that Amazon S3 encrypts data at rest by using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption keys (SSE-C). +/// * `x-amz-server-side-encryption-aws-kms-key-id` /// -/// * Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (`aws/s3`) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request. +/// * `x-amz-server-side-encryption-context` /// -/// * `x-amz-server-side-encryption` /// -/// * `x-amz-server-side-encryption-aws-kms-key-id` +/// * If you specify `x-amz-server-side-encryption:aws:kms`, but don't provide `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the Amazon Web Services managed key (`aws/s3` key) in KMS to protect the data. /// -/// * `x-amz-server-side-encryption-context` +/// * To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the `kms:Decrypt` and `kms:GenerateDataKey*` actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see [Multipart upload API and permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) and [Protecting data using server-side encryption with Amazon Web Services KMS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. /// +/// * If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role. /// -/// If you specify `x-amz-server-side-encryption:aws:kms`, but don't provide `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the Amazon Web Services managed key (`aws/s3` key) in KMS to protect the data. +/// * All `GET` and `PUT` requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) in the _Amazon S3 User Guide_. /// -/// All `GET` and `PUT` requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. /// -/// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html). +/// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. /// -/// * Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request. +/// * Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request. /// -/// * `x-amz-server-side-encryption-customer-algorithm` +/// * `x-amz-server-side-encryption-customer-algorithm` /// -/// * `x-amz-server-side-encryption-customer-key` +/// * `x-amz-server-side-encryption-customer-key` /// -/// * `x-amz-server-side-encryption-customer-key-MD5` +/// * `x-amz-server-side-encryption-customer-key-MD5` /// /// -/// For more information about server-side encryption with customer-provided encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). +/// For more information about server-side encryption with customer-provided encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. /// +/// * **Directory buckets** -For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. /// -/// Access-Control-List (ACL)-Specific Request Headers /// -/// You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see [Using ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). With this operation, you can grant access permissions using one of the following two methods: -/// -/// * Specify a canned ACL (`x-amz-acl`) — Amazon S3 supports a set of predefined ACLs, known as _canned ACLs_. Each canned ACL has a predefined set of grantees and permissions. For more information, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -/// -/// * Specify access permissions explicitly — To explicitly grant access permissions to specific Amazon Web Services accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use: -/// -/// * `x-amz-grant-read` -/// -/// * `x-amz-grant-write` -/// -/// * `x-amz-grant-read-acp` -/// -/// * `x-amz-grant-write-acp` -/// -/// * `x-amz-grant-full-control` -/// -/// -/// You specify each grantee as a type=value pair, where the type is one of the following: -/// -/// * `id` – if the value specified is the canonical user ID of an Amazon Web Services account -/// -/// * `uri` – if you are granting permissions to a predefined group -/// -/// * `emailAddress` – if the value specified is the email address of an Amazon Web Services account -/// -/// Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions: -/// -/// * US East (N. Virginia) -/// -/// * US West (N. California) -/// -/// * US West (Oregon) -/// -/// * Asia Pacific (Singapore) -/// -/// * Asia Pacific (Sydney) -/// -/// * Asia Pacific (Tokyo) -/// -/// * Europe (Ireland) -/// -/// * South America (São Paulo) -/// -/// -/// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the Amazon Web Services General Reference. -/// -/// -/// For example, the following `x-amz-grant-read` header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata: -/// -/// `x-amz-grant-read: id="11112222333", id="444455556666"` +/// HTTP Host header syntax /// +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `CreateMultipartUpload`: /// @@ -146,121 +96,71 @@ class CreateMultipartUploadOperation extends _i1.HttpOperation< CreateMultipartUploadRequest, CreateMultipartUploadOutputPayload, CreateMultipartUploadOutput> { - /// This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). You also include this upload ID in the final request to either complete or abort the multipart upload request. - /// - /// For more information about multipart uploads, see [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). - /// - /// If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). - /// - /// For information about the permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). - /// - /// For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). + /// This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_. /// - /// After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload. + /// After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload. /// - /// Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a KMS key, an Amazon S3 managed key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) and [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) requests must match the headers you used in the request to initiate the upload by using `CreateMultipartUpload`. You can request that Amazon S3 save the uploaded parts encrypted with server-side encryption with an Amazon S3 managed key (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C). + /// If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). /// - /// To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the `kms:Decrypt` and `kms:GenerateDataKey*` actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see [Multipart upload API and permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) and [Protecting data using server-side encryption with Amazon Web Services KMS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. + /// * **Directory buckets** \- S3 Lifecycle is not supported by directory buckets. /// - /// If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role. + /// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// For more information, see [Protecting Data Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). /// - /// Access Permissions + /// Request signing /// - /// When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers: + /// For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) in the _Amazon S3 User Guide_. /// - /// * Specify a canned ACL with the `x-amz-acl` request header. For more information, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + /// Permissions /// - /// * Specify access permissions explicitly with the `x-amz-grant-read`, `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and `x-amz-grant-full-control` headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). + /// * **General purpose bucket permissions** \- For information about the permissions required to use the multipart upload API, see [Multipart upload and permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// + /// To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the `kms:Decrypt` and `kms:GenerateDataKey*` actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see [Multipart upload API and permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) and [Protecting data using server-side encryption with Amazon Web Services KMS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. /// - /// You can use either a canned ACL or specify access permissions explicitly. You cannot do both. + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// Server-Side- Encryption-Specific Request Headers /// - /// Amazon S3 encrypts data by using server-side encryption with an Amazon S3 managed key (SSE-S3) by default. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can request that Amazon S3 encrypts data at rest by using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption keys (SSE-C). + /// Encryption /// - /// * Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (`aws/s3`) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request. + /// * **General purpose buckets** \- Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) and [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) requests must match the headers you used in the `CreateMultipartUpload` request. /// - /// * `x-amz-server-side-encryption` + /// * Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (`aws/s3`) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request. /// - /// * `x-amz-server-side-encryption-aws-kms-key-id` + /// * `x-amz-server-side-encryption` /// - /// * `x-amz-server-side-encryption-context` + /// * `x-amz-server-side-encryption-aws-kms-key-id` /// + /// * `x-amz-server-side-encryption-context` /// - /// If you specify `x-amz-server-side-encryption:aws:kms`, but don't provide `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the Amazon Web Services managed key (`aws/s3` key) in KMS to protect the data. /// - /// All `GET` and `PUT` requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. + /// * If you specify `x-amz-server-side-encryption:aws:kms`, but don't provide `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the Amazon Web Services managed key (`aws/s3` key) in KMS to protect the data. /// - /// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html). + /// * To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the `kms:Decrypt` and `kms:GenerateDataKey*` actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see [Multipart upload API and permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) and [Protecting data using server-side encryption with Amazon Web Services KMS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. /// - /// * Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request. + /// * If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role. /// - /// * `x-amz-server-side-encryption-customer-algorithm` + /// * All `GET` and `PUT` requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) in the _Amazon S3 User Guide_. /// - /// * `x-amz-server-side-encryption-customer-key` /// - /// * `x-amz-server-side-encryption-customer-key-MD5` + /// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. /// + /// * Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request. /// - /// For more information about server-side encryption with customer-provided encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). + /// * `x-amz-server-side-encryption-customer-algorithm` /// + /// * `x-amz-server-side-encryption-customer-key` /// - /// Access-Control-List (ACL)-Specific Request Headers + /// * `x-amz-server-side-encryption-customer-key-MD5` /// - /// You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see [Using ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). With this operation, you can grant access permissions using one of the following two methods: /// - /// * Specify a canned ACL (`x-amz-acl`) — Amazon S3 supports a set of predefined ACLs, known as _canned ACLs_. Each canned ACL has a predefined set of grantees and permissions. For more information, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + /// For more information about server-side encryption with customer-provided encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. /// - /// * Specify access permissions explicitly — To explicitly grant access permissions to specific Amazon Web Services accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use: + /// * **Directory buckets** -For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. /// - /// * `x-amz-grant-read` /// - /// * `x-amz-grant-write` - /// - /// * `x-amz-grant-read-acp` - /// - /// * `x-amz-grant-write-acp` - /// - /// * `x-amz-grant-full-control` - /// - /// - /// You specify each grantee as a type=value pair, where the type is one of the following: - /// - /// * `id` – if the value specified is the canonical user ID of an Amazon Web Services account - /// - /// * `uri` – if you are granting permissions to a predefined group - /// - /// * `emailAddress` – if the value specified is the email address of an Amazon Web Services account - /// - /// Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions: - /// - /// * US East (N. Virginia) - /// - /// * US West (N. California) - /// - /// * US West (Oregon) - /// - /// * Asia Pacific (Singapore) - /// - /// * Asia Pacific (Sydney) - /// - /// * Asia Pacific (Tokyo) - /// - /// * Europe (Ireland) - /// - /// * South America (São Paulo) - /// - /// - /// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the Amazon Web Services General Reference. - /// - /// - /// For example, the following `x-amz-grant-read` header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata: - /// - /// `x-amz-grant-read: id="11112222333", id="444455556666"` + /// HTTP Host header syntax /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `CreateMultipartUpload`: /// @@ -482,8 +382,10 @@ class CreateMultipartUploadOperation extends _i1.HttpOperation< } } }); + @override int successCode([CreateMultipartUploadOutput? output]) => 200; + @override CreateMultipartUploadOutput buildOutput( CreateMultipartUploadOutputPayload payload, @@ -493,12 +395,16 @@ class CreateMultipartUploadOperation extends _i1.HttpOperation< payload, response, ); + @override List<_i1.SmithyError> get errorTypes => const []; + @override String get runtimeTypeName => 'CreateMultipartUpload'; + @override _i2.AWSRetryer get retryer => _i2.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -519,6 +425,7 @@ class CreateMultipartUploadOperation extends _i1.HttpOperation< @override _i1.Endpoint get endpoint => _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( CreateMultipartUploadRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/delete_object_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/delete_object_operation.dart index d427d7111a..4f55036361 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/delete_object_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/delete_object_operation.dart @@ -14,15 +14,42 @@ import 'package:aws_signature_v4/aws_signature_v4.dart' as _i3; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i2; -/// Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects but will still respond that the command was successful. +/// Removes an object from a bucket. The behavior depends on the bucket's versioning state: /// -/// To remove a specific version, you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, `x-amz-delete-marker`, to true. +/// * If versioning is enabled, the operation removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects but will still respond that the command was successful. /// -/// If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the `x-amz-mfa` request header in the DELETE `versionId` request. Requests that include `x-amz-mfa` must use HTTPS. +/// * If versioning is suspended or not enabled, the operation permanently deletes the object. /// -/// For more information about MFA Delete, see [Using MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). To see sample requests that use versioning, see [Sample Request](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). /// -/// You can delete objects by explicitly calling DELETE Object or configure its lifecycle ([PutBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the `s3:DeleteObject`, `s3:DeleteObjectVersion`, and `s3:PutLifeCycleConfiguration` actions. +/// * **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the `null` value of the version ID is supported by directory buckets. You can only specify `null` to the `versionId` query parameter in the request. +/// +/// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. +/// +/// +/// To remove a specific version, you must use the `versionId` query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header `x-amz-delete-marker` to true. +/// +/// If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the `x-amz-mfa` request header in the DELETE `versionId` request. Requests that include `x-amz-mfa` must use HTTPS. For more information about MFA Delete, see [Using MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) in the _Amazon S3 User Guide_. To see sample requests that use versioning, see [Sample Request](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +/// +/// **Directory buckets** \- MFA delete is not supported by directory buckets. +/// +/// You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the `s3:DeleteObject`, `s3:DeleteObjectVersion`, and `s3:PutLifeCycleConfiguration` actions. +/// +/// **Directory buckets** \- S3 Lifecycle is not supported by directory buckets. +/// +/// Permissions +/// +/// * **General purpose bucket permissions** \- The following permissions are required in your policies when your `DeleteObjects` request includes specific headers. +/// +/// * **`s3:DeleteObject`** \- To delete an object from a bucket, you must always have the `s3:DeleteObject` permission. +/// +/// * **`s3:DeleteObjectVersion`** \- To delete a specific version of an object from a versiong-enabled bucket, you must have the `s3:DeleteObjectVersion` permission. +/// +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . +/// +/// +/// HTTP Host header syntax +/// +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following action is related to `DeleteObject`: /// @@ -32,15 +59,42 @@ class DeleteObjectOperation extends _i1.HttpOperation< DeleteObjectRequest, DeleteObjectOutputPayload, DeleteObjectOutput> { - /// Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects but will still respond that the command was successful. + /// Removes an object from a bucket. The behavior depends on the bucket's versioning state: + /// + /// * If versioning is enabled, the operation removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects but will still respond that the command was successful. + /// + /// * If versioning is suspended or not enabled, the operation permanently deletes the object. + /// + /// + /// * **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the `null` value of the version ID is supported by directory buckets. You can only specify `null` to the `versionId` query parameter in the request. /// - /// To remove a specific version, you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, `x-amz-delete-marker`, to true. + /// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the `x-amz-mfa` request header in the DELETE `versionId` request. Requests that include `x-amz-mfa` must use HTTPS. /// - /// For more information about MFA Delete, see [Using MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). To see sample requests that use versioning, see [Sample Request](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). + /// To remove a specific version, you must use the `versionId` query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header `x-amz-delete-marker` to true. /// - /// You can delete objects by explicitly calling DELETE Object or configure its lifecycle ([PutBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the `s3:DeleteObject`, `s3:DeleteObjectVersion`, and `s3:PutLifeCycleConfiguration` actions. + /// If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the `x-amz-mfa` request header in the DELETE `versionId` request. Requests that include `x-amz-mfa` must use HTTPS. For more information about MFA Delete, see [Using MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) in the _Amazon S3 User Guide_. To see sample requests that use versioning, see [Sample Request](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). + /// + /// **Directory buckets** \- MFA delete is not supported by directory buckets. + /// + /// You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the `s3:DeleteObject`, `s3:DeleteObjectVersion`, and `s3:PutLifeCycleConfiguration` actions. + /// + /// **Directory buckets** \- S3 Lifecycle is not supported by directory buckets. + /// + /// Permissions + /// + /// * **General purpose bucket permissions** \- The following permissions are required in your policies when your `DeleteObjects` request includes specific headers. + /// + /// * **`s3:DeleteObject`** \- To delete an object from a bucket, you must always have the `s3:DeleteObject` permission. + /// + /// * **`s3:DeleteObjectVersion`** \- To delete a specific version of an object from a versiong-enabled bucket, you must have the `s3:DeleteObjectVersion` permission. + /// + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following action is related to `DeleteObject`: /// @@ -137,8 +191,10 @@ class DeleteObjectOperation extends _i1.HttpOperation< ); } }); + @override int successCode([DeleteObjectOutput? output]) => 204; + @override DeleteObjectOutput buildOutput( DeleteObjectOutputPayload payload, @@ -148,12 +204,16 @@ class DeleteObjectOperation extends _i1.HttpOperation< payload, response, ); + @override List<_i1.SmithyError> get errorTypes => const []; + @override String get runtimeTypeName => 'DeleteObject'; + @override _i2.AWSRetryer get retryer => _i2.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -174,6 +234,7 @@ class DeleteObjectOperation extends _i1.HttpOperation< @override _i1.Endpoint get endpoint => _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( DeleteObjectRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/delete_objects_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/delete_objects_operation.dart index 8ca8030616..c63acc5567 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/delete_objects_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/delete_objects_operation.dart @@ -15,15 +15,42 @@ import 'package:aws_signature_v4/aws_signature_v4.dart' as _i3; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i2; -/// This action enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this action provides a suitable alternative to sending individual delete requests, reducing per-request overhead. +/// This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead. /// -/// The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete action and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted. +/// The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted. /// -/// The action supports two modes for the response: verbose and quiet. By default, the action uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete action encountered an error. For a successful deletion, the action does not return any information about the delete in the response body. +/// * **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. /// -/// When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see [MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +/// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// -/// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit. +/// +/// The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body. +/// +/// When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see [MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) in the _Amazon S3 User Guide_. +/// +/// **Directory buckets** \- MFA delete is not supported by directory buckets. +/// +/// Permissions +/// +/// * **General purpose bucket permissions** \- The following permissions are required in your policies when your `DeleteObjects` request includes specific headers. +/// +/// * **`s3:DeleteObject`** \- To delete an object from a bucket, you must always specify the `s3:DeleteObject` permission. +/// +/// * **`s3:DeleteObjectVersion`** \- To delete a specific version of an object from a versiong-enabled bucket, you must specify the `s3:DeleteObjectVersion` permission. +/// +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . +/// +/// +/// Content-MD5 request header +/// +/// * **General purpose bucket** \- The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit. +/// +/// * **Directory bucket** \- The Content-MD5 request header or a additional checksum request header (including `x-amz-checksum-crc32`, `x-amz-checksum-crc32c`, `x-amz-checksum-sha1`, or `x-amz-checksum-sha256`) is required for all Multi-Object Delete requests. +/// +/// +/// HTTP Host header syntax +/// +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `DeleteObjects`: /// @@ -38,15 +65,42 @@ import 'package:smithy_aws/smithy_aws.dart' as _i2; /// * [AbortMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) class DeleteObjectsOperation extends _i1.HttpOperation { - /// This action enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this action provides a suitable alternative to sending individual delete requests, reducing per-request overhead. + /// This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead. + /// + /// The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted. + /// + /// * **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. + /// + /// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// /// - /// The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete action and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted. + /// The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body. /// - /// The action supports two modes for the response: verbose and quiet. By default, the action uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete action encountered an error. For a successful deletion, the action does not return any information about the delete in the response body. + /// When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see [MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) in the _Amazon S3 User Guide_. /// - /// When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see [MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). + /// **Directory buckets** \- MFA delete is not supported by directory buckets. /// - /// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit. + /// Permissions + /// + /// * **General purpose bucket permissions** \- The following permissions are required in your policies when your `DeleteObjects` request includes specific headers. + /// + /// * **`s3:DeleteObject`** \- To delete an object from a bucket, you must always specify the `s3:DeleteObject` permission. + /// + /// * **`s3:DeleteObjectVersion`** \- To delete a specific version of an object from a versiong-enabled bucket, you must specify the `s3:DeleteObjectVersion` permission. + /// + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . + /// + /// + /// Content-MD5 request header + /// + /// * **General purpose bucket** \- The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit. + /// + /// * **Directory bucket** \- The Content-MD5 request header or a additional checksum request header (including `x-amz-checksum-crc32`, `x-amz-checksum-crc32c`, `x-amz-checksum-sha1`, or `x-amz-checksum-sha256`) is required for all Multi-Object Delete requests. + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `DeleteObjects`: /// @@ -152,8 +206,10 @@ class DeleteObjectsOperation extends _i1.HttpOperation 200; + @override DeleteObjectsOutput buildOutput( DeleteObjectsOutputPayload payload, @@ -163,12 +219,16 @@ class DeleteObjectsOperation extends _i1.HttpOperation get errorTypes => const []; + @override String get runtimeTypeName => 'DeleteObjects'; + @override _i2.AWSRetryer get retryer => _i2.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -189,6 +249,7 @@ class DeleteObjectsOperation extends _i1.HttpOperation _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( DeleteObjectsRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/get_object_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/get_object_operation.dart index 258f4474f1..0b7b86f09a 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/get_object_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/get_object_operation.dart @@ -16,78 +16,69 @@ import 'package:aws_signature_v4/aws_signature_v4.dart' as _i4; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i3; -/// Retrieves objects from Amazon S3. To use `GET`, you must have `READ` access to the object. If you grant `READ` access to the anonymous user, you can return the object without using an authorization header. +/// Retrieves an object from Amazon S3. /// -/// An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object `sample.jpg`, you can name it `photos/2006/February/sample.jpg`. +/// In the `GetObject` request, specify the full key name for the object. /// -/// To get an object from such a logical hierarchy, specify the full key name for the object in the `GET` operation. For a virtual hosted-style request example, if you have the object `photos/2006/February/sample.jpg`, specify the resource as `/photos/2006/February/sample.jpg`. For a path-style request example, if you have the object `photos/2006/February/sample.jpg` in the bucket named `examplebucket`, specify the resource as `/examplebucket/photos/2006/February/sample.jpg`. For more information about request types, see [HTTP Host Header Bucket Specification](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +/// **General purpose buckets** \- Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object `photos/2006/February/sample.jpg`, specify the object key name as `/photos/2006/February/sample.jpg`. For a path-style request example, if you have the object `photos/2006/February/sample.jpg` in the bucket named `examplebucket`, specify the object key name as `/examplebucket/photos/2006/February/sample.jpg`. For more information about request types, see [HTTP Host Header Bucket Specification](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) in the _Amazon S3 User Guide_. /// -/// For more information about returning the ACL of an object, see [GetObjectAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). +/// **Directory buckets** \- Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object `photos/2006/February/sample.jpg` in the bucket named `examplebucket--use1-az5--x-s3`, specify the object key name as `/photos/2006/February/sample.jpg`. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// -/// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). Otherwise, this action returns an `InvalidObjectState` error. For information about restoring archived objects, see [Restoring Archived Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +/// Permissions /// -/// Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request error. +/// * **General purpose bucket permissions** \- You must have the required permissions in a policy. To use `GetObject`, you must have the `READ` access to the object (or version). If you grant `READ` access to the anonymous user, the `GetObject` operation returns the object without using an authorization header. For more information, see [Specifying permissions in a policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in the _Amazon S3 User Guide_. /// -/// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers: +/// If you include a `versionId` in your request header, you must have the `s3:GetObjectVersion` permission to access a specific version of an object. The `s3:GetObject` permission is not required in this scenario. /// -/// * `x-amz-server-side-encryption-customer-algorithm` +/// If you request the current version of an object without a specific `versionId` in the request header, only the `s3:GetObject` permission is required. The `s3:GetObjectVersion` permission is not required in this scenario. /// -/// * `x-amz-server-side-encryption-customer-key` +/// If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the `s3:ListBucket` permission. /// -/// * `x-amz-server-side-encryption-customer-key-MD5` +/// * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code `404 Not Found` error. /// +/// * If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code `403 Access Denied` error. /// -/// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// -/// Assuming you have the relevant permission to read object tags, the response also returns the `x-amz-tagging-count` header that provides the count of number of tags associated with the object. You can use [GetObjectTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) to retrieve the tag set associated with an object. /// -/// Permissions +/// Storage classes /// -/// You need the relevant read object (or version) permission for this operation. For more information, see [Specifying Permissions in a Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the `s3:ListBucket` permission. +/// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). Otherwise, this operation returns an `InvalidObjectState` error. For information about restoring archived objects, see [Restoring Archived Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) in the _Amazon S3 User Guide_. /// -/// If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code 404 (Not Found) error. +/// **Directory buckets** \- For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Unsupported storage class values won't write a destination object and will respond with the HTTP status code `400 Bad Request`. /// -/// If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code 403 ("access denied") error. +/// Encryption /// -/// Versioning +/// Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for the `GetObject` requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your `GetObject` requests for the object that uses these types of keys, you’ll get an HTTP `400 Bad Request` error. /// -/// By default, the `GET` action returns the current version of an object. To return a different version, use the `versionId` subresource. +/// Overriding response header values through the request /// -/// * If you supply a `versionId`, you need the `s3:GetObjectVersion` permission to access a specific version of an object. If you request a specific version, you do not need to have the `s3:GetObject` permission. If you request the current version without a specific version ID, only `s3:GetObject` permission is required. `s3:GetObjectVersion` permission won't be required. +/// There are times when you want to override certain response header values of a `GetObject` response. For example, you might override the `Content-Disposition` response header value through your `GetObject` request. /// -/// * If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes `x-amz-delete-marker: true` in the response. +/// You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code `200 OK` is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. /// +/// The response headers that you can override for the `GetObject` response are `Cache-Control`, `Content-Disposition`, `Content-Encoding`, `Content-Language`, `Content-Type`, and `Expires`. /// -/// For more information about versioning, see [PutBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). +/// To override values for a set of response headers in the `GetObject` response, you can use the following query parameters in the request. /// -/// Overriding Response Header Values +/// * `response-cache-control` /// -/// There are times when you want to override certain response header values in a `GET` response. For example, you might override the `Content-Disposition` response header value in your `GET` request. +/// * `response-content-disposition` /// -/// You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the `GET` response are `Content-Type`, `Content-Language`, `Expires`, `Cache-Control`, `Content-Disposition`, and `Content-Encoding`. To override these header values in the `GET` response, you use the following request parameters. +/// * `response-content-encoding` /// -/// You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request. +/// * `response-content-language` /// /// * `response-content-type` /// -/// * `response-content-language` -/// /// * `response-expires` /// -/// * `response-cache-control` /// -/// * `response-content-disposition` +/// When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request. /// -/// * `response-content-encoding` -/// -/// -/// Overriding Response Header Values -/// -/// If both of the `If-Match` and `If-Unmodified-Since` headers are present in the request as follows: `If-Match` condition evaluates to `true`, and; `If-Unmodified-Since` condition evaluates to `false`; then, S3 returns 200 OK and the data requested. -/// -/// If both of the `If-None-Match` and `If-Modified-Since` headers are present in the request as follows: `If-None-Match` condition evaluates to `false`, and; `If-Modified-Since` condition evaluates to `true`; then, S3 returns 304 Not Modified response code. +/// HTTP Host header syntax /// -/// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `GetObject`: /// @@ -96,78 +87,69 @@ import 'package:smithy_aws/smithy_aws.dart' as _i3; /// * [GetObjectAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) class GetObjectOperation extends _i1.HttpOperation>, GetObjectOutput> { - /// Retrieves objects from Amazon S3. To use `GET`, you must have `READ` access to the object. If you grant `READ` access to the anonymous user, you can return the object without using an authorization header. + /// Retrieves an object from Amazon S3. /// - /// An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object `sample.jpg`, you can name it `photos/2006/February/sample.jpg`. + /// In the `GetObject` request, specify the full key name for the object. /// - /// To get an object from such a logical hierarchy, specify the full key name for the object in the `GET` operation. For a virtual hosted-style request example, if you have the object `photos/2006/February/sample.jpg`, specify the resource as `/photos/2006/February/sample.jpg`. For a path-style request example, if you have the object `photos/2006/February/sample.jpg` in the bucket named `examplebucket`, specify the resource as `/examplebucket/photos/2006/February/sample.jpg`. For more information about request types, see [HTTP Host Header Bucket Specification](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). + /// **General purpose buckets** \- Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object `photos/2006/February/sample.jpg`, specify the object key name as `/photos/2006/February/sample.jpg`. For a path-style request example, if you have the object `photos/2006/February/sample.jpg` in the bucket named `examplebucket`, specify the object key name as `/examplebucket/photos/2006/February/sample.jpg`. For more information about request types, see [HTTP Host Header Bucket Specification](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) in the _Amazon S3 User Guide_. /// - /// For more information about returning the ACL of an object, see [GetObjectAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). + /// **Directory buckets** \- Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object `photos/2006/February/sample.jpg` in the bucket named `examplebucket--use1-az5--x-s3`, specify the object key name as `/photos/2006/February/sample.jpg`. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). Otherwise, this action returns an `InvalidObjectState` error. For information about restoring archived objects, see [Restoring Archived Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). + /// Permissions /// - /// Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request error. + /// * **General purpose bucket permissions** \- You must have the required permissions in a policy. To use `GetObject`, you must have the `READ` access to the object (or version). If you grant `READ` access to the anonymous user, the `GetObject` operation returns the object without using an authorization header. For more information, see [Specifying permissions in a policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in the _Amazon S3 User Guide_. /// - /// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers: + /// If you include a `versionId` in your request header, you must have the `s3:GetObjectVersion` permission to access a specific version of an object. The `s3:GetObject` permission is not required in this scenario. /// - /// * `x-amz-server-side-encryption-customer-algorithm` + /// If you request the current version of an object without a specific `versionId` in the request header, only the `s3:GetObject` permission is required. The `s3:GetObjectVersion` permission is not required in this scenario. /// - /// * `x-amz-server-side-encryption-customer-key` + /// If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the `s3:ListBucket` permission. /// - /// * `x-amz-server-side-encryption-customer-key-MD5` + /// * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code `404 Not Found` error. /// + /// * If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code `403 Access Denied` error. /// - /// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// Assuming you have the relevant permission to read object tags, the response also returns the `x-amz-tagging-count` header that provides the count of number of tags associated with the object. You can use [GetObjectTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) to retrieve the tag set associated with an object. /// - /// Permissions + /// Storage classes /// - /// You need the relevant read object (or version) permission for this operation. For more information, see [Specifying Permissions in a Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the `s3:ListBucket` permission. + /// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). Otherwise, this operation returns an `InvalidObjectState` error. For information about restoring archived objects, see [Restoring Archived Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) in the _Amazon S3 User Guide_. /// - /// If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code 404 (Not Found) error. + /// **Directory buckets** \- For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Unsupported storage class values won't write a destination object and will respond with the HTTP status code `400 Bad Request`. /// - /// If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code 403 ("access denied") error. + /// Encryption /// - /// Versioning + /// Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for the `GetObject` requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your `GetObject` requests for the object that uses these types of keys, you’ll get an HTTP `400 Bad Request` error. /// - /// By default, the `GET` action returns the current version of an object. To return a different version, use the `versionId` subresource. + /// Overriding response header values through the request /// - /// * If you supply a `versionId`, you need the `s3:GetObjectVersion` permission to access a specific version of an object. If you request a specific version, you do not need to have the `s3:GetObject` permission. If you request the current version without a specific version ID, only `s3:GetObject` permission is required. `s3:GetObjectVersion` permission won't be required. + /// There are times when you want to override certain response header values of a `GetObject` response. For example, you might override the `Content-Disposition` response header value through your `GetObject` request. /// - /// * If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes `x-amz-delete-marker: true` in the response. + /// You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code `200 OK` is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. /// + /// The response headers that you can override for the `GetObject` response are `Cache-Control`, `Content-Disposition`, `Content-Encoding`, `Content-Language`, `Content-Type`, and `Expires`. /// - /// For more information about versioning, see [PutBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). + /// To override values for a set of response headers in the `GetObject` response, you can use the following query parameters in the request. /// - /// Overriding Response Header Values + /// * `response-cache-control` /// - /// There are times when you want to override certain response header values in a `GET` response. For example, you might override the `Content-Disposition` response header value in your `GET` request. + /// * `response-content-disposition` /// - /// You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the `GET` response are `Content-Type`, `Content-Language`, `Expires`, `Cache-Control`, `Content-Disposition`, and `Content-Encoding`. To override these header values in the `GET` response, you use the following request parameters. + /// * `response-content-encoding` /// - /// You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request. + /// * `response-content-language` /// /// * `response-content-type` /// - /// * `response-content-language` - /// /// * `response-expires` /// - /// * `response-cache-control` - /// - /// * `response-content-disposition` - /// - /// * `response-content-encoding` - /// - /// - /// Overriding Response Header Values /// - /// If both of the `If-Match` and `If-Unmodified-Since` headers are present in the request as follows: `If-Match` condition evaluates to `true`, and; `If-Unmodified-Since` condition evaluates to `false`; then, S3 returns 200 OK and the data requested. + /// When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request. /// - /// If both of the `If-None-Match` and `If-Modified-Since` headers are present in the request as follows: `If-None-Match` condition evaluates to `false`, and; `If-Modified-Since` condition evaluates to `true`; then, S3 returns 304 Not Modified response code. + /// HTTP Host header syntax /// - /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `GetObject`: /// @@ -349,8 +331,10 @@ class GetObjectOperation extends _i1.HttpOperation 200; + @override GetObjectOutput buildOutput( _i2.Stream> payload, @@ -360,6 +344,7 @@ class GetObjectOperation extends _i1.HttpOperation get errorTypes => const [ _i1.SmithyError( @@ -369,6 +354,7 @@ class GetObjectOperation extends _i1.HttpOperation( @@ -378,13 +364,17 @@ class GetObjectOperation extends _i1.HttpOperation 'GetObject'; + @override _i3.AWSRetryer get retryer => _i3.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -405,6 +395,7 @@ class GetObjectOperation extends _i1.HttpOperation _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( GetObjectRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/head_object_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/head_object_operation.dart index fe2964f5e3..2bb0b611f2 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/head_object_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/head_object_operation.dart @@ -15,59 +15,59 @@ import 'package:aws_signature_v4/aws_signature_v4.dart' as _i3; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i2; -/// The `HEAD` action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use `HEAD`, you must have READ access to the object. +/// The `HEAD` operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. /// -/// A `HEAD` request has the same options as a `GET` action on an object. The response is identical to the `GET` response except that there is no response body. Because of this, if the `HEAD` request generates an error, it returns a generic `400 Bad Request`, `403 Forbidden` or `404 Not Found` code. It is not possible to retrieve the exact exception beyond these error codes. +/// A `HEAD` request has the same options as a `GET` operation on an object. The response is identical to the `GET` response except that there is no response body. Because of this, if the `HEAD` request generates an error, it returns a generic code, such as `400 Bad Request`, `403 Forbidden`, `404 Not Found`, `405 Method Not Allowed`, `412 Precondition Failed`, or `304 Not Modified`. It's not possible to retrieve the exact exception of these error codes. /// -/// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers: -/// -/// * `x-amz-server-side-encryption-customer-algorithm` +/// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). /// -/// * `x-amz-server-side-encryption-customer-key` +/// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// -/// * `x-amz-server-side-encryption-customer-key-MD5` +/// Permissions /// +/// * **General purpose bucket permissions** \- To use `HEAD`, you must have the `s3:GetObject` permission. You need the relevant read object (or version) permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) in the _Amazon S3 User Guide_. /// -/// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +/// If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the `s3:ListBucket` permission. /// -/// * Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for `GET` requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request error. +/// * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code `404 Not Found` error. /// -/// * The last modified property in this case is the creation date of the object. +/// * If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code `403 Forbidden` error. /// +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// -/// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). /// -/// Consider the following when using request headers: +/// Encryption /// -/// * Consideration 1 – If both of the `If-Match` and `If-Unmodified-Since` headers are present in the request as follows: +/// Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for `HEAD` requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The `x-amz-server-side-encryption` header is used when you `PUT` an object to S3 and want to specify the encryption method. If you include this header in a `HEAD` request for an object that uses these types of keys, you’ll get an HTTP `400 Bad Request` error. It's because the encryption method can't be changed when you retrieve the object. /// -/// * `If-Match` condition evaluates to `true`, and; +/// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: /// -/// * `If-Unmodified-Since` condition evaluates to `false`; +/// * `x-amz-server-side-encryption-customer-algorithm` /// +/// * `x-amz-server-side-encryption-customer-key` /// -/// Then Amazon S3 returns `200 OK` and the data requested. +/// * `x-amz-server-side-encryption-customer-key-MD5` /// -/// * Consideration 2 – If both of the `If-None-Match` and `If-Modified-Since` headers are present in the request as follows: /// -/// * `If-None-Match` condition evaluates to `false`, and; +/// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. /// -/// * `If-Modified-Since` condition evaluates to `true`; +/// **Directory bucket permissions** \- For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. /// +/// Versioning /// -/// Then Amazon S3 returns the `304 Not Modified` response code. +/// * If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes `x-amz-delete-marker: true` in the response. /// +/// * If the specified version is a delete marker, the response returns a `405 Method Not Allowed` error and the `Last-Modified: timestamp` response header. /// -/// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). /// -/// Permissions +/// * **Directory buckets** \- Delete marker is not supported by directory buckets. /// -/// You need the relevant read object (or version) permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. +/// * **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the `null` value of the version ID is supported by directory buckets. You can only specify `null` to the `versionId` query parameter in the request. /// -/// * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code 404 error. /// -/// * If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code 403 error. +/// HTTP Host header syntax /// +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following actions are related to `HeadObject`: /// @@ -76,59 +76,59 @@ import 'package:smithy_aws/smithy_aws.dart' as _i2; /// * [GetObjectAttributes](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) class HeadObjectOperation extends _i1.HttpOperation { - /// The `HEAD` action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use `HEAD`, you must have READ access to the object. - /// - /// A `HEAD` request has the same options as a `GET` action on an object. The response is identical to the `GET` response except that there is no response body. Because of this, if the `HEAD` request generates an error, it returns a generic `400 Bad Request`, `403 Forbidden` or `404 Not Found` code. It is not possible to retrieve the exact exception beyond these error codes. + /// The `HEAD` operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. /// - /// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers: + /// A `HEAD` request has the same options as a `GET` operation on an object. The response is identical to the `GET` response except that there is no response body. Because of this, if the `HEAD` request generates an error, it returns a generic code, such as `400 Bad Request`, `403 Forbidden`, `404 Not Found`, `405 Method Not Allowed`, `412 Precondition Failed`, or `304 Not Modified`. It's not possible to retrieve the exact exception of these error codes. /// - /// * `x-amz-server-side-encryption-customer-algorithm` + /// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). /// - /// * `x-amz-server-side-encryption-customer-key` + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// * `x-amz-server-side-encryption-customer-key-MD5` + /// Permissions /// + /// * **General purpose bucket permissions** \- To use `HEAD`, you must have the `s3:GetObject` permission. You need the relevant read object (or version) permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) in the _Amazon S3 User Guide_. /// - /// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + /// If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the `s3:ListBucket` permission. /// - /// * Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for `GET` requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request error. + /// * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code `404 Not Found` error. /// - /// * The last modified property in this case is the creation date of the object. + /// * If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code `403 Forbidden` error. /// + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). /// - /// Consider the following when using request headers: + /// Encryption /// - /// * Consideration 1 – If both of the `If-Match` and `If-Unmodified-Since` headers are present in the request as follows: + /// Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for `HEAD` requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The `x-amz-server-side-encryption` header is used when you `PUT` an object to S3 and want to specify the encryption method. If you include this header in a `HEAD` request for an object that uses these types of keys, you’ll get an HTTP `400 Bad Request` error. It's because the encryption method can't be changed when you retrieve the object. /// - /// * `If-Match` condition evaluates to `true`, and; + /// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: /// - /// * `If-Unmodified-Since` condition evaluates to `false`; + /// * `x-amz-server-side-encryption-customer-algorithm` /// + /// * `x-amz-server-side-encryption-customer-key` /// - /// Then Amazon S3 returns `200 OK` and the data requested. + /// * `x-amz-server-side-encryption-customer-key-MD5` /// - /// * Consideration 2 – If both of the `If-None-Match` and `If-Modified-Since` headers are present in the request as follows: /// - /// * `If-None-Match` condition evaluates to `false`, and; + /// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. /// - /// * `If-Modified-Since` condition evaluates to `true`; + /// **Directory bucket permissions** \- For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. /// + /// Versioning /// - /// Then Amazon S3 returns the `304 Not Modified` response code. + /// * If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes `x-amz-delete-marker: true` in the response. /// + /// * If the specified version is a delete marker, the response returns a `405 Method Not Allowed` error and the `Last-Modified: timestamp` response header. /// - /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). /// - /// Permissions + /// * **Directory buckets** \- Delete marker is not supported by directory buckets. /// - /// You need the relevant read object (or version) permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. + /// * **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the `null` value of the version ID is supported by directory buckets. You can only specify `null` to the `versionId` query parameter in the request. /// - /// * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code 404 error. /// - /// * If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code 403 error. + /// HTTP Host header syntax /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following actions are related to `HeadObject`: /// @@ -269,8 +269,10 @@ class HeadObjectOperation extends _i1.HttpOperation 200; + @override HeadObjectOutput buildOutput( HeadObjectOutputPayload payload, @@ -280,6 +282,7 @@ class HeadObjectOperation extends _i1.HttpOperation get errorTypes => const [ _i1.SmithyError( @@ -292,10 +295,13 @@ class HeadObjectOperation extends _i1.HttpOperation 'HeadObject'; + @override _i2.AWSRetryer get retryer => _i2.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -316,6 +322,7 @@ class HeadObjectOperation extends _i1.HttpOperation _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( HeadObjectRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_multipart_uploads_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_multipart_uploads_operation.dart index f14ad0e1e0..870ac6f9e4 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_multipart_uploads_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_multipart_uploads_operation.dart @@ -14,15 +14,39 @@ import 'package:aws_signature_v4/aws_signature_v4.dart' as _i3; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i2; -/// This action lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted. +/// This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the `CreateMultipartUpload` request, but has not yet been completed or aborted. /// -/// This action returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the `max-uploads` parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an `IsTruncated` element with the value true. To list the additional multipart uploads, use the `key-marker` and `upload-id-marker` request parameters. +/// **Directory buckets** \- If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. /// -/// In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time. +/// The `ListMultipartUploads` operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the `max-uploads` request parameter. If there are more than 1,000 multipart uploads that satisfy your `ListMultipartUploads` request, the response returns an `IsTruncated` element with the value of `true`, a `NextKeyMarker` element, and a `NextUploadIdMarker` element. To list the remaining multipart uploads, you need to make subsequent `ListMultipartUploads` requests. In these requests, include two query parameters: `key-marker` and `upload-id-marker`. Set the value of `key-marker` to the `NextKeyMarker` value from the previous response. Similarly, set the value of `upload-id-marker` to the `NextUploadIdMarker` value from the previous response. /// -/// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +/// **Directory buckets** \- The `upload-id-marker` element and the `NextUploadIdMarker` element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of `key-marker` to the `NextKeyMarker` value from the previous response. /// -/// For information on permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +/// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. +/// +/// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. +/// +/// Permissions +/// +/// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. +/// +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . +/// +/// +/// Sorting of multipart uploads in response +/// +/// * **General purpose bucket** \- In the `ListMultipartUploads` response, the multipart uploads are sorted based on two criteria: +/// +/// * Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys. +/// +/// * Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later. +/// +/// * **Directory bucket** \- In the `ListMultipartUploads` response, the multipart uploads aren't sorted lexicographically based on the object keys. +/// +/// +/// HTTP Host header syntax +/// +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `ListMultipartUploads`: /// @@ -40,15 +64,39 @@ class ListMultipartUploadsOperation extends _i1.HttpOperation< ListMultipartUploadsRequest, ListMultipartUploadsOutputPayload, ListMultipartUploadsOutput> { - /// This action lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted. + /// This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the `CreateMultipartUpload` request, but has not yet been completed or aborted. + /// + /// **Directory buckets** \- If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. + /// + /// The `ListMultipartUploads` operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the `max-uploads` request parameter. If there are more than 1,000 multipart uploads that satisfy your `ListMultipartUploads` request, the response returns an `IsTruncated` element with the value of `true`, a `NextKeyMarker` element, and a `NextUploadIdMarker` element. To list the remaining multipart uploads, you need to make subsequent `ListMultipartUploads` requests. In these requests, include two query parameters: `key-marker` and `upload-id-marker`. Set the value of `key-marker` to the `NextKeyMarker` value from the previous response. Similarly, set the value of `upload-id-marker` to the `NextUploadIdMarker` value from the previous response. + /// + /// **Directory buckets** \- The `upload-id-marker` element and the `NextUploadIdMarker` element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of `key-marker` to the `NextKeyMarker` value from the previous response. + /// + /// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. /// - /// This action returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the `max-uploads` parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an `IsTruncated` element with the value true. To list the additional multipart uploads, use the `key-marker` and `upload-id-marker` request parameters. + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time. + /// Permissions /// - /// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). + /// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// - /// For information on permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . + /// + /// + /// Sorting of multipart uploads in response + /// + /// * **General purpose bucket** \- In the `ListMultipartUploads` response, the multipart uploads are sorted based on two criteria: + /// + /// * Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys. + /// + /// * Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later. + /// + /// * **Directory bucket** \- In the `ListMultipartUploads` response, the multipart uploads aren't sorted lexicographically based on the object keys. + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `ListMultipartUploads`: /// @@ -176,8 +224,10 @@ class ListMultipartUploadsOperation extends _i1.HttpOperation< ); } }); + @override int successCode([ListMultipartUploadsOutput? output]) => 200; + @override ListMultipartUploadsOutput buildOutput( ListMultipartUploadsOutputPayload payload, @@ -187,12 +237,16 @@ class ListMultipartUploadsOperation extends _i1.HttpOperation< payload, response, ); + @override List<_i1.SmithyError> get errorTypes => const []; + @override String get runtimeTypeName => 'ListMultipartUploads'; + @override _i2.AWSRetryer get retryer => _i2.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -213,6 +267,7 @@ class ListMultipartUploadsOperation extends _i1.HttpOperation< @override _i1.Endpoint get endpoint => _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( ListMultipartUploadsRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_objects_v2_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_objects_v2_operation.dart index 356fcc9a0c..c94fdb66a4 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_objects_v2_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_objects_v2_operation.dart @@ -15,15 +15,29 @@ import 'package:aws_signature_v4/aws_signature_v4.dart' as _i3; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i2; -/// Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A `200 OK` response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list. For more information about listing objects, see [Listing object keys programmatically](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) in the _Amazon S3 User Guide_. +/// Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A `200 OK` response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see [Listing object keys programmatically](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) in the _Amazon S3 User Guide_. To get a list of your buckets, see [ListBuckets](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). /// -/// To use this operation, you must have READ access to the bucket. +/// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// -/// To use this action in an Identity and Access Management (IAM) policy, you must have permission to perform the `s3:ListBucket` action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) and [Managing Access Permissions to Your Amazon S3 Resources](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) in the _Amazon S3 User Guide_. +/// Permissions /// -/// This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, [ListObjects](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). +/// * **General purpose bucket permissions** \- To use this operation, you must have READ access to the bucket. You must have permission to perform the `s3:ListBucket` action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) and [Managing Access Permissions to Your Amazon S3 Resources](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) in the _Amazon S3 User Guide_. +/// +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . +/// +/// +/// Sorting order of returned objects +/// +/// * **General purpose bucket** \- For general purpose buckets, `ListObjectsV2` returns objects in lexicographical order based on their key names. +/// +/// * **Directory bucket** \- For directory buckets, `ListObjectsV2` does not return objects in lexicographical order. +/// +/// +/// HTTP Host header syntax /// -/// To get a list of your buckets, see [ListBuckets](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. +/// +/// This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, [ListObjects](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). /// /// The following operations are related to `ListObjectsV2`: /// @@ -40,15 +54,29 @@ class ListObjectsV2Operation extends _i1.PaginatedHttpOperation< String, int, ListObjectsV2Output> { - /// Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A `200 OK` response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list. For more information about listing objects, see [Listing object keys programmatically](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) in the _Amazon S3 User Guide_. + /// Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A `200 OK` response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see [Listing object keys programmatically](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) in the _Amazon S3 User Guide_. To get a list of your buckets, see [ListBuckets](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). /// - /// To use this operation, you must have READ access to the bucket. + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// To use this action in an Identity and Access Management (IAM) policy, you must have permission to perform the `s3:ListBucket` action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) and [Managing Access Permissions to Your Amazon S3 Resources](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) in the _Amazon S3 User Guide_. + /// Permissions /// - /// This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, [ListObjects](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). + /// * **General purpose bucket permissions** \- To use this operation, you must have READ access to the bucket. You must have permission to perform the `s3:ListBucket` action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) and [Managing Access Permissions to Your Amazon S3 Resources](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) in the _Amazon S3 User Guide_. + /// + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . + /// + /// + /// Sorting order of returned objects + /// + /// * **General purpose bucket** \- For general purpose buckets, `ListObjectsV2` returns objects in lexicographical order based on their key names. /// - /// To get a list of your buckets, see [ListBuckets](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). + /// * **Directory bucket** \- For directory buckets, `ListObjectsV2` does not return objects in lexicographical order. + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. + /// + /// This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, [ListObjects](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). /// /// The following operations are related to `ListObjectsV2`: /// @@ -185,8 +213,10 @@ class ListObjectsV2Operation extends _i1.PaginatedHttpOperation< ); } }); + @override int successCode([ListObjectsV2Output? output]) => 200; + @override ListObjectsV2Output buildOutput( ListObjectsV2OutputPayload payload, @@ -196,6 +226,7 @@ class ListObjectsV2Operation extends _i1.PaginatedHttpOperation< payload, response, ); + @override List<_i1.SmithyError> get errorTypes => const [ _i1.SmithyError( @@ -205,13 +236,17 @@ class ListObjectsV2Operation extends _i1.PaginatedHttpOperation< ), _i1.ErrorKind.client, NoSuchBucket, + statusCode: 404, builder: NoSuchBucket.fromResponse, ) ]; + @override String get runtimeTypeName => 'ListObjectsV2'; + @override _i2.AWSRetryer get retryer => _i2.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -232,6 +267,7 @@ class ListObjectsV2Operation extends _i1.PaginatedHttpOperation< @override _i1.Endpoint get endpoint => _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( ListObjectsV2Request input, { @@ -253,8 +289,10 @@ class ListObjectsV2Operation extends _i1.PaginatedHttpOperation< @override String? getToken(ListObjectsV2Output output) => output.nextContinuationToken; + @override ListObjectsV2Output getItems(ListObjectsV2Output output) => output; + @override ListObjectsV2Request rebuildInput( ListObjectsV2Request input, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_parts_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_parts_operation.dart index d7e8dd1e6b..ea7ee5f29b 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_parts_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/list_parts_operation.dart @@ -16,13 +16,28 @@ import 'package:built_collection/built_collection.dart' as _i2; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i3; -/// Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the `max-parts` request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an `IsTruncated` field with the value of true, and a `NextPartNumberMarker` element. In subsequent `ListParts` requests you can include the part-number-marker query string parameter and set its value to the `NextPartNumberMarker` field value from the previous response. +/// Lists the parts that have been uploaded for a specific multipart upload. /// -/// If the upload was created using a checksum algorithm, you will need to have permission to the `kms:Decrypt` action for the request to succeed. +/// To use this operation, you must provide the `upload ID` in the request. You obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). /// -/// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +/// The `ListParts` request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the `max-parts` request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an `IsTruncated` field with the value of `true`, and a `NextPartNumberMarker` element. To list remaining uploaded parts, in subsequent `ListParts` requests, include the `part-number-marker` query string parameter and set its value to the `NextPartNumberMarker` field value from the previous response. /// -/// For information on permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +/// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. +/// +/// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. +/// +/// Permissions +/// +/// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. +/// +/// If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the `kms:Decrypt` action for the `ListParts` request to succeed. +/// +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . +/// +/// +/// HTTP Host header syntax +/// +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `ListParts`: /// @@ -45,13 +60,28 @@ class ListPartsOperation extends _i1.PaginatedHttpOperation< String, int, _i2.BuiltList> { - /// Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the `max-parts` request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an `IsTruncated` field with the value of true, and a `NextPartNumberMarker` element. In subsequent `ListParts` requests you can include the part-number-marker query string parameter and set its value to the `NextPartNumberMarker` field value from the previous response. + /// Lists the parts that have been uploaded for a specific multipart upload. + /// + /// To use this operation, you must provide the `upload ID` in the request. You obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). + /// + /// The `ListParts` request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the `max-parts` request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an `IsTruncated` field with the value of `true`, and a `NextPartNumberMarker` element. To list remaining uploaded parts, in subsequent `ListParts` requests, include the `part-number-marker` query string parameter and set its value to the `NextPartNumberMarker` field value from the previous response. + /// + /// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. + /// + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// + /// Permissions /// - /// If the upload was created using a checksum algorithm, you will need to have permission to the `kms:Decrypt` action for the request to succeed. + /// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// - /// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). + /// If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the `kms:Decrypt` action for the `ListParts` request to succeed. /// - /// For information on permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `ListParts`: /// @@ -176,8 +206,10 @@ class ListPartsOperation extends _i1.PaginatedHttpOperation< input.uploadId, ); }); + @override int successCode([ListPartsOutput? output]) => 200; + @override ListPartsOutput buildOutput( ListPartsOutputPayload payload, @@ -187,12 +219,16 @@ class ListPartsOperation extends _i1.PaginatedHttpOperation< payload, response, ); + @override List<_i1.SmithyError> get errorTypes => const []; + @override String get runtimeTypeName => 'ListParts'; + @override _i3.AWSRetryer get retryer => _i3.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -213,6 +249,7 @@ class ListPartsOperation extends _i1.PaginatedHttpOperation< @override _i1.Endpoint get endpoint => _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( ListPartsRequest input, { @@ -234,9 +271,11 @@ class ListPartsOperation extends _i1.PaginatedHttpOperation< @override String? getToken(ListPartsOutput output) => output.nextPartNumberMarker; + @override _i2.BuiltList getItems(ListPartsOutput output) => output.parts ?? _i2.BuiltList(); + @override ListPartsRequest rebuildInput( ListPartsRequest input, diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/put_object_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/put_object_operation.dart index cfbd0eb2cc..24cbaf921d 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/put_object_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/put_object_operation.dart @@ -14,34 +14,49 @@ import 'package:aws_signature_v4/aws_signature_v4.dart' as _i4; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i3; -/// Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it. +/// Adds an object to a bucket. /// -/// Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use `PutObject` to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values. +/// * Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use `PutObject` to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values. /// -/// Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. To prevent objects from being deleted or overwritten, you can use [Amazon S3 Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). +/// * If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner. /// -/// To ensure that data is not corrupted traversing the network, use the `Content-MD5` header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value. +/// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// -/// * To successfully complete the `PutObject` request, you must have the `s3:PutObject` in your IAM permissions. /// -/// * To successfully change the objects acl of your `PutObject` request, you must have the `s3:PutObjectAcl` in your IAM permissions. +/// Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior: /// -/// * To successfully set the tag-set with your `PutObject` request, you must have the `s3:PutObjectTagging` in your IAM permissions. +/// * **S3 Object Lock** \- To prevent objects from being deleted or overwritten, you can use [Amazon S3 Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) in the _Amazon S3 User Guide_. /// -/// * The `Content-MD5` header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see [Amazon S3 Object Lock Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) in the _Amazon S3 User Guide_. +/// This functionality is not supported for directory buckets. /// +/// * **S3 Versioning** \- When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) in the _Amazon S3 User Guide_. For information about returning the versioning state of a bucket, see [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). /// -/// You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +/// This functionality is not supported for directory buckets. /// -/// When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and [Managing ACLs Using the REST API](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). /// -/// If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the `bucket-owner-full-control` canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a `400` error with the error code `AccessControlListNotSupported`. For more information, see [Controlling ownership of objects and disabling ACLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) in the _Amazon S3 User Guide_. +/// Permissions /// -/// If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner. +/// * **General purpose bucket permissions** \- The following permissions are required in your policies when your `PutObject` request includes specific headers. /// -/// By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in the _Amazon S3 User Guide_. +/// * **`s3:PutObject`** \- To successfully complete the `PutObject` request, you must always have the `s3:PutObject` permission on a bucket to add an object to it. /// -/// If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). For information about returning the versioning state of a bucket, see [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +/// * **`s3:PutObjectAcl`** \- To successfully change the objects ACL of your `PutObject` request, you must have the `s3:PutObjectAcl`. +/// +/// * **`s3:PutObjectTagging`** \- To successfully set the tag-set with your `PutObject` request, you must have the `s3:PutObjectTagging`. +/// +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . +/// +/// +/// Data integrity with Content-MD5 +/// +/// * **General purpose bucket** \- To ensure that data is not corrupted traversing the network, use the `Content-MD5` header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value. +/// +/// * **Directory bucket** \- This functionality is not supported for directory buckets. +/// +/// +/// HTTP Host header syntax +/// +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// For more information about related Amazon S3 APIs, see the following: /// @@ -50,34 +65,49 @@ import 'package:smithy_aws/smithy_aws.dart' as _i3; /// * [DeleteObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) class PutObjectOperation extends _i1.HttpOperation<_i2.Stream>, PutObjectRequest, PutObjectOutputPayload, PutObjectOutput> { - /// Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it. + /// Adds an object to a bucket. + /// + /// * Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use `PutObject` to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values. + /// + /// * If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner. + /// + /// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// /// - /// Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use `PutObject` to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values. + /// Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior: /// - /// Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. To prevent objects from being deleted or overwritten, you can use [Amazon S3 Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). + /// * **S3 Object Lock** \- To prevent objects from being deleted or overwritten, you can use [Amazon S3 Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) in the _Amazon S3 User Guide_. /// - /// To ensure that data is not corrupted traversing the network, use the `Content-MD5` header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value. + /// This functionality is not supported for directory buckets. /// - /// * To successfully complete the `PutObject` request, you must have the `s3:PutObject` in your IAM permissions. + /// * **S3 Versioning** \- When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) in the _Amazon S3 User Guide_. For information about returning the versioning state of a bucket, see [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). /// - /// * To successfully change the objects acl of your `PutObject` request, you must have the `s3:PutObjectAcl` in your IAM permissions. + /// This functionality is not supported for directory buckets. /// - /// * To successfully set the tag-set with your `PutObject` request, you must have the `s3:PutObjectTagging` in your IAM permissions. /// - /// * The `Content-MD5` header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see [Amazon S3 Object Lock Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) in the _Amazon S3 User Guide_. + /// Permissions /// + /// * **General purpose bucket permissions** \- The following permissions are required in your policies when your `PutObject` request includes specific headers. /// - /// You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). + /// * **`s3:PutObject`** \- To successfully complete the `PutObject` request, you must always have the `s3:PutObject` permission on a bucket to add an object to it. /// - /// When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and [Managing ACLs Using the REST API](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). + /// * **`s3:PutObjectAcl`** \- To successfully change the objects ACL of your `PutObject` request, you must have the `s3:PutObjectAcl`. /// - /// If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the `bucket-owner-full-control` canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a `400` error with the error code `AccessControlListNotSupported`. For more information, see [Controlling ownership of objects and disabling ACLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) in the _Amazon S3 User Guide_. + /// * **`s3:PutObjectTagging`** \- To successfully set the tag-set with your `PutObject` request, you must have the `s3:PutObjectTagging`. /// - /// If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner. + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in the _Amazon S3 User Guide_. /// - /// If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). For information about returning the versioning state of a bucket, see [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). + /// Data integrity with Content-MD5 + /// + /// * **General purpose bucket** \- To ensure that data is not corrupted traversing the network, use the `Content-MD5` header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value. + /// + /// * **Directory bucket** \- This functionality is not supported for directory buckets. + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// For more information about related Amazon S3 APIs, see the following: /// @@ -321,8 +351,10 @@ class PutObjectOperation extends _i1.HttpOperation<_i2.Stream>, .add(_i3.WithChecksum(input.checksumAlgorithm!.value)); } }); + @override int successCode([PutObjectOutput? output]) => 200; + @override PutObjectOutput buildOutput( PutObjectOutputPayload payload, @@ -332,12 +364,16 @@ class PutObjectOperation extends _i1.HttpOperation<_i2.Stream>, payload, response, ); + @override List<_i1.SmithyError> get errorTypes => const []; + @override String get runtimeTypeName => 'PutObject'; + @override _i3.AWSRetryer get retryer => _i3.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -358,6 +394,7 @@ class PutObjectOperation extends _i1.HttpOperation<_i2.Stream>, @override _i1.Endpoint get endpoint => _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( PutObjectRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/select_object_content_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/select_object_content_operation.dart index 6494508807..79c0ca40f9 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/select_object_content_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/select_object_content_operation.dart @@ -15,15 +15,17 @@ import 'package:aws_signature_v4/aws_signature_v4.dart' as _i3; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i2; +/// This operation is not supported by directory buckets. +/// /// This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. /// -/// This action is not supported by Amazon S3 on Outposts. +/// This functionality is not supported for Amazon S3 on Outposts. /// /// For more information about Amazon S3 Select, see [Selecting Content from Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) and [SELECT Command](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) in the _Amazon S3 User Guide_. /// /// Permissions /// -/// You must have `s3:GetObject` permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see [Specifying Permissions in a Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in the _Amazon S3 User Guide_. +/// You must have the `s3:GetObject` permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see [Specifying Permissions in a Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in the _Amazon S3 User Guide_. /// /// Object Data Formats /// @@ -71,15 +73,17 @@ class SelectObjectContentOperation extends _i1.HttpOperation< SelectObjectContentRequest, SelectObjectContentEventStream, SelectObjectContentOutput> { + /// This operation is not supported by directory buckets. + /// /// This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. /// - /// This action is not supported by Amazon S3 on Outposts. + /// This functionality is not supported for Amazon S3 on Outposts. /// /// For more information about Amazon S3 Select, see [Selecting Content from Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) and [SELECT Command](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) in the _Amazon S3 User Guide_. /// /// Permissions /// - /// You must have `s3:GetObject` permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see [Specifying Permissions in a Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in the _Amazon S3 User Guide_. + /// You must have the `s3:GetObject` permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see [Specifying Permissions in a Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in the _Amazon S3 User Guide_. /// /// Object Data Formats /// @@ -218,8 +222,10 @@ class SelectObjectContentOperation extends _i1.HttpOperation< } } }); + @override int successCode([SelectObjectContentOutput? output]) => 200; + @override SelectObjectContentOutput buildOutput( SelectObjectContentEventStream? payload, @@ -229,12 +235,16 @@ class SelectObjectContentOperation extends _i1.HttpOperation< payload, response, ); + @override List<_i1.SmithyError> get errorTypes => const []; + @override String get runtimeTypeName => 'SelectObjectContent'; + @override _i2.AWSRetryer get retryer => _i2.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -255,6 +265,7 @@ class SelectObjectContentOperation extends _i1.HttpOperation< @override _i1.Endpoint get endpoint => _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( SelectObjectContentRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/upload_part_copy_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/upload_part_copy_operation.dart index dc19cbcf41..ff3bf06e00 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/upload_part_copy_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/upload_part_copy_operation.dart @@ -15,66 +15,74 @@ import 'package:aws_signature_v4/aws_signature_v4.dart' as _i3; import 'package:smithy/smithy.dart' as _i1; import 'package:smithy_aws/smithy_aws.dart' as _i2; -/// Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header `x-amz-copy-source` in your request and a byte range by adding the request header `x-amz-copy-source-range` in your request. +/// Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header `x-amz-copy-source` in your request. To specify a byte range, you add the request header `x-amz-copy-source-range` in your request. /// /// For information about maximum and minimum part sizes and other multipart upload specifications, see [Multipart upload limits](https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the _Amazon S3 User Guide_. /// -/// Instead of using an existing object as part data, you might use the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action and provide data in your request. +/// Instead of copying data from an existing object as part data, you might use the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action to upload new data as a part of an object in your request. /// -/// You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request. +/// You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request. /// -/// For more information about using the `UploadPartCopy` operation, see the following: +/// For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. For information about copying objects using a single atomic action vs. a multipart upload, see [Operations on Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in the _Amazon S3 User Guide_. /// -/// * For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. +/// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// -/// * For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. +/// Authentication and authorization /// -/// * For information about copying objects using a single atomic action vs. a multipart upload, see [Operations on Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in the _Amazon S3 User Guide_. +/// All `UploadPartCopy` requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the `x-amz-` prefix, including `x-amz-copy-source`, must be signed. For more information, see [REST Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). /// -/// * For information about using server-side encryption with customer-provided encryption keys with the `UploadPartCopy` operation, see [CopyObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) and [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +/// **Directory buckets** \- You must use IAM credentials to authenticate and authorize your access to the `UploadPartCopy` API operation, instead of using the temporary security credentials through the `CreateSession` API operation. /// +/// Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. /// -/// Note the following additional considerations about the request headers `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, and `x-amz-copy-source-if-modified-since`: +/// Permissions /// -/// * **Consideration 1** \- If both of the `x-amz-copy-source-if-match` and `x-amz-copy-source-if-unmodified-since` headers are present in the request as follows: +/// You must have `READ` access to the source object and `WRITE` access to the destination bucket. /// -/// `x-amz-copy-source-if-match` condition evaluates to `true`, and; +/// * **General purpose bucket permissions** \- You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an `UploadPartCopy` operation. /// -/// `x-amz-copy-source-if-unmodified-since` condition evaluates to `false`; +/// * If the source object is in a general purpose bucket, you must have the **`s3:GetObject`** permission to read the source object that is being copied. /// -/// Amazon S3 returns `200 OK` and copies the data. +/// * If the destination bucket is a general purpose bucket, you must have the **`s3:PubObject`** permission to write the object copy to the destination bucket. /// -/// * **Consideration 2** \- If both of the `x-amz-copy-source-if-none-match` and `x-amz-copy-source-if-modified-since` headers are present in the request as follows: /// -/// `x-amz-copy-source-if-none-match` condition evaluates to `false`, and; +/// For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// -/// `x-amz-copy-source-if-modified-since` condition evaluates to `true`; +/// * **Directory bucket permissions** \- You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an `UploadPartCopy` operation. /// -/// Amazon S3 returns `412 Precondition Failed` response code. +/// * If the source object that you want to copy is in a directory bucket, you must have the **`s3express:CreateSession`** permission in the `Action` element of a policy to read the object . By default, the session is in the `ReadWrite` mode. If you want to restrict the access, you can explicitly set the `s3express:SessionMode` condition key to `ReadOnly` on the copy source bucket. /// +/// * If the copy destination is a directory bucket, you must have the **`s3express:CreateSession`** permission in the `Action` element of a policy to write the object to the destination. The `s3express:SessionMode` condition key cannot be set to `ReadOnly` on the copy destination. /// -/// Versioning /// -/// If your bucket has versioning enabled, you could have multiple versions of the same object. By default, `x-amz-copy-source` identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the `x-amz-copy-source`, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the `x-amz-copy-source` and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the `x-amz-copy-source`. +/// For example policies, see [Example bucket policies for S3 Express One Zone](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) in the _Amazon S3 User Guide_. /// -/// You can optionally specify a specific version of the source object to copy by adding the `versionId` subresource as shown in the following example: /// -/// `x-amz-copy-source: /bucket/object?versionId=version id` +/// Encryption +/// +/// * **General purpose buckets** \- For information about using server-side encryption with customer-provided encryption keys with the `UploadPartCopy` operation, see [CopyObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) and [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +/// +/// * **Directory buckets** \- For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. +/// /// /// Special errors /// -/// * * _Code: NoSuchUpload_ +/// * Error Code: `NoSuchUpload` +/// +/// * Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. +/// +/// * HTTP Status Code: 404 Not Found /// -/// * _Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed._ +/// * Error Code: `InvalidRequest` /// -/// * _HTTP Status Code: 404 Not Found_ +/// * Description: The specified copy source is not supported as a byte-range copy source. /// -/// * * _Code: InvalidRequest_ +/// * HTTP Status Code: 400 Bad Request /// -/// * _Cause: The specified copy source is not supported as a byte-range copy source._ /// -/// * _HTTP Status Code: 400 Bad Request_ +/// HTTP Host header syntax /// +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `UploadPartCopy`: /// @@ -94,66 +102,74 @@ class UploadPartCopyOperation extends _i1.HttpOperation< UploadPartCopyRequest, CopyPartResult, UploadPartCopyOutput> { - /// Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header `x-amz-copy-source` in your request and a byte range by adding the request header `x-amz-copy-source-range` in your request. + /// Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header `x-amz-copy-source` in your request. To specify a byte range, you add the request header `x-amz-copy-source-range` in your request. /// /// For information about maximum and minimum part sizes and other multipart upload specifications, see [Multipart upload limits](https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the _Amazon S3 User Guide_. /// - /// Instead of using an existing object as part data, you might use the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action and provide data in your request. + /// Instead of copying data from an existing object as part data, you might use the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action to upload new data as a part of an object in your request. /// - /// You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request. + /// You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request. /// - /// For more information about using the `UploadPartCopy` operation, see the following: + /// For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. For information about copying objects using a single atomic action vs. a multipart upload, see [Operations on Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in the _Amazon S3 User Guide_. /// - /// * For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// * For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. + /// Authentication and authorization /// - /// * For information about copying objects using a single atomic action vs. a multipart upload, see [Operations on Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in the _Amazon S3 User Guide_. + /// All `UploadPartCopy` requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the `x-amz-` prefix, including `x-amz-copy-source`, must be signed. For more information, see [REST Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). /// - /// * For information about using server-side encryption with customer-provided encryption keys with the `UploadPartCopy` operation, see [CopyObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) and [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). + /// **Directory buckets** \- You must use IAM credentials to authenticate and authorize your access to the `UploadPartCopy` API operation, instead of using the temporary security credentials through the `CreateSession` API operation. /// + /// Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. /// - /// Note the following additional considerations about the request headers `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, and `x-amz-copy-source-if-modified-since`: + /// Permissions /// - /// * **Consideration 1** \- If both of the `x-amz-copy-source-if-match` and `x-amz-copy-source-if-unmodified-since` headers are present in the request as follows: + /// You must have `READ` access to the source object and `WRITE` access to the destination bucket. /// - /// `x-amz-copy-source-if-match` condition evaluates to `true`, and; + /// * **General purpose bucket permissions** \- You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an `UploadPartCopy` operation. /// - /// `x-amz-copy-source-if-unmodified-since` condition evaluates to `false`; + /// * If the source object is in a general purpose bucket, you must have the **`s3:GetObject`** permission to read the source object that is being copied. /// - /// Amazon S3 returns `200 OK` and copies the data. + /// * If the destination bucket is a general purpose bucket, you must have the **`s3:PubObject`** permission to write the object copy to the destination bucket. /// - /// * **Consideration 2** \- If both of the `x-amz-copy-source-if-none-match` and `x-amz-copy-source-if-modified-since` headers are present in the request as follows: /// - /// `x-amz-copy-source-if-none-match` condition evaluates to `false`, and; + /// For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// - /// `x-amz-copy-source-if-modified-since` condition evaluates to `true`; + /// * **Directory bucket permissions** \- You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an `UploadPartCopy` operation. /// - /// Amazon S3 returns `412 Precondition Failed` response code. + /// * If the source object that you want to copy is in a directory bucket, you must have the **`s3express:CreateSession`** permission in the `Action` element of a policy to read the object . By default, the session is in the `ReadWrite` mode. If you want to restrict the access, you can explicitly set the `s3express:SessionMode` condition key to `ReadOnly` on the copy source bucket. /// + /// * If the copy destination is a directory bucket, you must have the **`s3express:CreateSession`** permission in the `Action` element of a policy to write the object to the destination. The `s3express:SessionMode` condition key cannot be set to `ReadOnly` on the copy destination. /// - /// Versioning /// - /// If your bucket has versioning enabled, you could have multiple versions of the same object. By default, `x-amz-copy-source` identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the `x-amz-copy-source`, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the `x-amz-copy-source` and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the `x-amz-copy-source`. + /// For example policies, see [Example bucket policies for S3 Express One Zone](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) in the _Amazon S3 User Guide_. /// - /// You can optionally specify a specific version of the source object to copy by adding the `versionId` subresource as shown in the following example: /// - /// `x-amz-copy-source: /bucket/object?versionId=version id` + /// Encryption + /// + /// * **General purpose buckets** \- For information about using server-side encryption with customer-provided encryption keys with the `UploadPartCopy` operation, see [CopyObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) and [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). + /// + /// * **Directory buckets** \- For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. + /// /// /// Special errors /// - /// * * _Code: NoSuchUpload_ + /// * Error Code: `NoSuchUpload` + /// + /// * Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. /// - /// * _Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed._ + /// * HTTP Status Code: 404 Not Found /// - /// * _HTTP Status Code: 404 Not Found_ + /// * Error Code: `InvalidRequest` /// - /// * * _Code: InvalidRequest_ + /// * Description: The specified copy source is not supported as a byte-range copy source. /// - /// * _Cause: The specified copy source is not supported as a byte-range copy source._ + /// * HTTP Status Code: 400 Bad Request /// - /// * _HTTP Status Code: 400 Bad Request_ /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `UploadPartCopy`: /// @@ -332,8 +348,10 @@ class UploadPartCopyOperation extends _i1.HttpOperation< input.uploadId, ); }); + @override int successCode([UploadPartCopyOutput? output]) => 200; + @override UploadPartCopyOutput buildOutput( CopyPartResult? payload, @@ -343,12 +361,16 @@ class UploadPartCopyOperation extends _i1.HttpOperation< payload, response, ); + @override List<_i1.SmithyError> get errorTypes => const []; + @override String get runtimeTypeName => 'UploadPartCopy'; + @override _i2.AWSRetryer get retryer => _i2.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -369,6 +391,7 @@ class UploadPartCopyOperation extends _i1.HttpOperation< @override _i1.Endpoint get endpoint => _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( UploadPartCopyRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/upload_part_operation.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/upload_part_operation.dart index f7242522a4..aef883fd10 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/upload_part_operation.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/operation/upload_part_operation.dart @@ -16,47 +16,66 @@ import 'package:smithy_aws/smithy_aws.dart' as _i3; /// Uploads a part in a multipart upload. /// -/// In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) operation. +/// In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) operation. /// -/// You must initiate a multipart upload (see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request. +/// You must initiate a multipart upload (see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request. /// /// Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. /// /// For information about maximum and minimum part sizes and other multipart upload specifications, see [Multipart upload limits](https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the _Amazon S3 User Guide_. /// -/// To ensure that data is not corrupted when traversing the network, specify the `Content-MD5` header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. +/// After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage. /// -/// If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the `x-amz-content-sha256` header as a checksum instead of `Content-MD5`. For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +/// For more information on multipart uploads, go to [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_ . /// -/// **Note:** After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage. +/// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// -/// For more information on multipart uploads, go to [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_ . +/// Permissions +/// +/// * **General purpose bucket permissions** \- For information on the permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. +/// +/// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . +/// +/// +/// Data integrity +/// +/// **General purpose bucket** \- To ensure that data is not corrupted traversing the network, specify the `Content-MD5` header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the `x-amz-content-sha256` header as a checksum instead of `Content-MD5`. For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +/// +/// **Directory buckets** \- MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity. /// -/// For information on the permissions required to use the multipart upload API, go to [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. +/// Encryption /// -/// Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have three mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). For more information, go to [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the _Amazon S3 User Guide_. +/// * **General purpose bucket** \- Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). /// -/// Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +/// Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). /// -/// If you requested server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers. +/// If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers. /// -/// * x-amz-server-side-encryption-customer-algorithm +/// * x-amz-server-side-encryption-customer-algorithm /// -/// * x-amz-server-side-encryption-customer-key +/// * x-amz-server-side-encryption-customer-key /// -/// * x-amz-server-side-encryption-customer-key-MD5 +/// * x-amz-server-side-encryption-customer-key-MD5 /// +/// * **Directory bucket** \- For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. /// -/// `UploadPart` has the following special errors: /// -/// * * _Code: NoSuchUpload_ +/// For more information, see [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the _Amazon S3 User Guide_. /// -/// * _Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed._ +/// Special errors /// -/// * _HTTP Status Code: 404 Not Found_ +/// * Error Code: `NoSuchUpload` /// -/// * _SOAP Fault Code Prefix: Client_ +/// * Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. /// +/// * HTTP Status Code: 404 Not Found +/// +/// * SOAP Fault Code Prefix: Client +/// +/// +/// HTTP Host header syntax +/// +/// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `UploadPart`: /// @@ -73,47 +92,66 @@ class UploadPartOperation extends _i1.HttpOperation<_i2.Stream>, UploadPartRequest, UploadPartOutputPayload, UploadPartOutput> { /// Uploads a part in a multipart upload. /// - /// In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) operation. + /// In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) operation. /// - /// You must initiate a multipart upload (see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request. + /// You must initiate a multipart upload (see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request. /// /// Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. /// /// For information about maximum and minimum part sizes and other multipart upload specifications, see [Multipart upload limits](https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the _Amazon S3 User Guide_. /// - /// To ensure that data is not corrupted when traversing the network, specify the `Content-MD5` header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. + /// After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage. /// - /// If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the `x-amz-content-sha256` header as a checksum instead of `Content-MD5`. For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). + /// For more information on multipart uploads, go to [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_ . /// - /// **Note:** After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage. + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// For more information on multipart uploads, go to [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_ . + /// Permissions + /// + /// * **General purpose bucket permissions** \- For information on the permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. + /// + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// For information on the permissions required to use the multipart upload API, go to [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// - /// Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have three mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). For more information, go to [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the _Amazon S3 User Guide_. + /// Data integrity /// - /// Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). + /// **General purpose bucket** \- To ensure that data is not corrupted traversing the network, specify the `Content-MD5` header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the `x-amz-content-sha256` header as a checksum instead of `Content-MD5`. For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). /// - /// If you requested server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers. + /// **Directory buckets** \- MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity. /// - /// * x-amz-server-side-encryption-customer-algorithm + /// Encryption /// - /// * x-amz-server-side-encryption-customer-key + /// * **General purpose bucket** \- Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). /// - /// * x-amz-server-side-encryption-customer-key-MD5 + /// Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). /// + /// If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers. /// - /// `UploadPart` has the following special errors: + /// * x-amz-server-side-encryption-customer-algorithm /// - /// * * _Code: NoSuchUpload_ + /// * x-amz-server-side-encryption-customer-key /// - /// * _Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed._ + /// * x-amz-server-side-encryption-customer-key-MD5 /// - /// * _HTTP Status Code: 404 Not Found_ + /// * **Directory bucket** \- For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. /// - /// * _SOAP Fault Code Prefix: Client_ /// + /// For more information, see [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the _Amazon S3 User Guide_. + /// + /// Special errors + /// + /// * Error Code: `NoSuchUpload` + /// + /// * Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. + /// + /// * HTTP Status Code: 404 Not Found + /// + /// * SOAP Fault Code Prefix: Client + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `UploadPart`: /// @@ -266,8 +304,10 @@ class UploadPartOperation extends _i1.HttpOperation<_i2.Stream>, .add(_i3.WithChecksum(input.checksumAlgorithm!.value)); } }); + @override int successCode([UploadPartOutput? output]) => 200; + @override UploadPartOutput buildOutput( UploadPartOutputPayload payload, @@ -277,12 +317,16 @@ class UploadPartOperation extends _i1.HttpOperation<_i2.Stream>, payload, response, ); + @override List<_i1.SmithyError> get errorTypes => const []; + @override String get runtimeTypeName => 'UploadPart'; + @override _i3.AWSRetryer get retryer => _i3.AWSRetryer(); + @override Uri get baseUri { var baseUri = _baseUri ?? endpoint.uri; @@ -303,6 +347,7 @@ class UploadPartOperation extends _i1.HttpOperation<_i2.Stream>, @override _i1.Endpoint get endpoint => _awsEndpoint.endpoint; + @override _i1.SmithyOperation run( UploadPartRequest input, { diff --git a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/s3_client.dart b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/s3_client.dart index ebffd87dd5..bc68db59d0 100644 --- a/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/s3_client.dart +++ b/packages/storage/amplify_storage_s3_dart/lib/src/sdk/src/s3/s3_client.dart @@ -86,11 +86,22 @@ class S3Client { final List<_i4.HttpResponseInterceptor> _responseInterceptors; - /// This action aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts. + /// This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts. /// - /// To verify that all parts have been removed, so you don't get charged for the part storage, you should call the [ListParts](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) action and ensure that the parts list is empty. + /// To verify that all parts have been removed and prevent getting charged for the part storage, you should call the [ListParts](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) API operation and ensure that the parts list is empty. /// - /// For information about permissions required to use the multipart upload, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// + /// Permissions + /// + /// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. + /// + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `AbortMultipartUpload`: /// @@ -124,44 +135,55 @@ class S3Client { /// Completes a multipart upload by assembling previously uploaded parts. /// - /// You first initiate the multipart upload and then upload all parts using the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation. After successfully uploading all relevant parts of an upload, you call this action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This action concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the `ETag` value, returned after that part was uploaded. + /// You first initiate the multipart upload and then upload all parts using the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation or the [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) operation. After successfully uploading all relevant parts of an upload, you call this `CompleteMultipartUpload` operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the `PartNumber` value and the `ETag` value that are returned after that part was uploaded. /// - /// Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK response has been sent. This means that a `200 OK` response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error). + /// The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a `200 OK` response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial `200 OK` response has been sent. This means that a `200 OK` response can contain either a success or an error. The error response might be embedded in the `200 OK` response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). /// /// Note that if `CompleteMultipartUpload` fails, applications should be prepared to retry the failed requests. For more information, see [Amazon S3 Error Best Practices](https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). /// - /// You cannot use `Content-Type: application/x-www-form-urlencoded` with Complete Multipart Upload requests. Also, if you do not provide a `Content-Type` header, `CompleteMultipartUpload` returns a 200 OK response. + /// You can't use `Content-Type: application/x-www-form-urlencoded` for the CompleteMultipartUpload requests. Also, if you don't provide a `Content-Type` header, `CompleteMultipartUpload` can still return a `200 OK` response. + /// + /// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. + /// + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// + /// Permissions + /// + /// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// - /// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). /// - /// `CompleteMultipartUpload` has the following special errors: + /// Special errors /// - /// * Error code: `EntityTooSmall` + /// * Error Code: `EntityTooSmall` /// /// * Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part. /// - /// * 400 Bad Request + /// * HTTP Status Code: 400 Bad Request /// - /// * Error code: `InvalidPart` + /// * Error Code: `InvalidPart` /// - /// * Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag. + /// * Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag. /// - /// * 400 Bad Request + /// * HTTP Status Code: 400 Bad Request /// - /// * Error code: `InvalidPartOrder` + /// * Error Code: `InvalidPartOrder` /// /// * Description: The list of parts was not in ascending order. The parts list must be specified in order by part number. /// - /// * 400 Bad Request + /// * HTTP Status Code: 400 Bad Request /// - /// * Error code: `NoSuchUpload` + /// * Error Code: `NoSuchUpload` /// /// * Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. /// - /// * 404 Not Found + /// * HTTP Status Code: 404 Not Found + /// + /// + /// HTTP Host header syntax /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `CompleteMultipartUpload`: /// @@ -197,90 +219,64 @@ class S3Client { /// /// You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API](https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). /// - /// All copy requests must be authenticated. Additionally, you must have _read_ access to the source object and _write_ access to the destination bucket. For more information, see [REST Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. - /// - /// A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the `200 OK` response. This means that a `200 OK` response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error). - /// - /// If the copy is successful, you receive a response with information about the copied object. - /// - /// If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body. - /// - /// The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. For pricing information, see [Amazon S3 pricing](http://aws.amazon.com/s3/pricing/). - /// - /// Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 `Bad Request` error. For more information, see [Transfer Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). - /// - /// Metadata - /// - /// When copying an object, you can preserve all metadata (the default) or specify new metadata. However, the access control list (ACL) is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see [Using ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). - /// - /// To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the `x-amz-metadata-directive` header. When you grant permissions, you can use the `s3:x-amz-metadata-directive` condition key to enforce certain metadata behavior when objects are uploaded. For more information, see [Specifying Conditions in a Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) in the _Amazon S3 User Guide_. For a complete list of Amazon S3-specific condition keys, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). + /// You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets. /// - /// `x-amz-website-redirect-location` is unique to each object and must be specified in the request headers to copy the value. + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// x-amz-copy-source-if Headers + /// Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. /// - /// To only copy an object under certain conditions, such as whether the `Etag` matches or whether the object was modified before or after a specified date, use the following request parameters: + /// Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a `400 Bad Request` error. For more information, see [Transfer Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). /// - /// * `x-amz-copy-source-if-match` + /// Authentication and authorization /// - /// * `x-amz-copy-source-if-none-match` + /// All `CopyObject` requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the `x-amz-` prefix, including `x-amz-copy-source`, must be signed. For more information, see [REST Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). /// - /// * `x-amz-copy-source-if-unmodified-since` + /// **Directory buckets** \- You must use the IAM credentials to authenticate and authorize your access to the `CopyObject` API operation, instead of using the temporary security credentials through the `CreateSession` API operation. /// - /// * `x-amz-copy-source-if-modified-since` + /// Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. /// + /// Permissions /// - /// If both the `x-amz-copy-source-if-match` and `x-amz-copy-source-if-unmodified-since` headers are present in the request and evaluate as follows, Amazon S3 returns `200 OK` and copies the data: - /// - /// * `x-amz-copy-source-if-match` condition evaluates to true - /// - /// * `x-amz-copy-source-if-unmodified-since` condition evaluates to false - /// - /// - /// If both the `x-amz-copy-source-if-none-match` and `x-amz-copy-source-if-modified-since` headers are present in the request and evaluate as follows, Amazon S3 returns the `412 Precondition Failed` response code: - /// - /// * `x-amz-copy-source-if-none-match` condition evaluates to false + /// You must have _read_ access to the source object and _write_ access to the destination bucket. /// - /// * `x-amz-copy-source-if-modified-since` condition evaluates to true + /// * **General purpose bucket permissions** \- You must have permissions in an IAM policy based on the source and destination bucket types in a `CopyObject` operation. /// + /// * If the source object is in a general purpose bucket, you must have **`s3:GetObject`** permission to read the source object that is being copied. /// - /// All headers with the `x-amz-` prefix, including `x-amz-copy-source`, must be signed. + /// * If the destination bucket is a general purpose bucket, you must have **`s3:PubObject`** permission to write the object copy to the destination bucket. /// - /// Server-side encryption + /// * **Directory bucket permissions** \- You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a `CopyObject` operation. /// - /// Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy. + /// * If the source object that you want to copy is in a directory bucket, you must have the **`s3express:CreateSession`** permission in the `Action` element of a policy to read the object. By default, the session is in the `ReadWrite` mode. If you want to restrict the access, you can explicitly set the `s3express:SessionMode` condition key to `ReadOnly` on the copy source bucket. /// - /// When you perform a `CopyObject` operation, if you want to use a different type of encryption setting for the target object, you can use other appropriate encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. For more information about server-side encryption, see [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). + /// * If the copy destination is a directory bucket, you must have the **`s3express:CreateSession`** permission in the `Action` element of a policy to write the object to the destination. The `s3express:SessionMode` condition key can't be set to `ReadOnly` on the copy destination bucket. /// - /// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the _Amazon S3 User Guide_. /// - /// Access Control List (ACL)-Specific Request Headers + /// For example policies, see [Example bucket policies for S3 Express One Zone](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) in the _Amazon S3 User Guide_. /// - /// When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups that are defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and [Managing ACLs Using the REST API](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). /// - /// If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept `PUT` requests that don't specify an ACL or `PUT` requests that specify bucket owner full control ACLs, such as the `bucket-owner-full-control` canned ACL or an equivalent form of this ACL expressed in the XML format. + /// Response and special errors /// - /// For more information, see [Controlling ownership of objects and disabling ACLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) in the _Amazon S3 User Guide_. + /// When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the `Content-Length`. You always need to read the entire response body to check if the copy succeeds. to keep the connection alive while we copy the data. /// - /// If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner. + /// * If the copy is successful, you receive a response with information about the copied object. /// - /// Checksums + /// * A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A `200 OK` response can contain either a success or an error. /// - /// When copying an object, if it has a checksum, that checksum will be copied to the new object by default. When you copy the object over, you can optionally specify a different checksum algorithm to use with the `x-amz-checksum-algorithm` header. + /// * If the error occurs before the copy action starts, you receive a standard Amazon S3 error. /// - /// Storage Class Options + /// * If the error occurs during the copy operation, the error response is embedded in the `200 OK` response. For example, in a cross-region copy, you may encounter throttling and receive a `200 OK` response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3](repost.aws/knowledge-center/s3-resolve-200-internalerror). The `200 OK` status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a `200 OK` response. You must stay connected to Amazon S3 until the entire response is successfully received and processed. /// - /// You can use the `CopyObject` action to change the storage class of an object that is already stored in Amazon S3 by using the `StorageClass` parameter. For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in the _Amazon S3 User Guide_. + /// If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). /// - /// If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). For more information, see [Copying Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). /// - /// Versioning + /// Charge /// - /// By default, `x-amz-copy-source` header identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the `versionId` subresource. + /// The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. For pricing information, see [Amazon S3 pricing](http://aws.amazon.com/s3/pricing/). /// - /// If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the `x-amz-version-id` response header in the response. + /// HTTP Host header syntax /// - /// If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null. + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `CopyObject`: /// @@ -306,121 +302,71 @@ class S3Client { ); } - /// This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). You also include this upload ID in the final request to either complete or abort the multipart upload request. - /// - /// For more information about multipart uploads, see [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). - /// - /// If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). - /// - /// For information about the permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). - /// - /// For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). - /// - /// After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload. - /// - /// Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a KMS key, an Amazon S3 managed key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) and [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) requests must match the headers you used in the request to initiate the upload by using `CreateMultipartUpload`. You can request that Amazon S3 save the uploaded parts encrypted with server-side encryption with an Amazon S3 managed key (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C). - /// - /// To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the `kms:Decrypt` and `kms:GenerateDataKey*` actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see [Multipart upload API and permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) and [Protecting data using server-side encryption with Amazon Web Services KMS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. - /// - /// If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role. - /// - /// For more information, see [Protecting Data Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). - /// - /// Access Permissions - /// - /// When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers: - /// - /// * Specify a canned ACL with the `x-amz-acl` request header. For more information, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). - /// - /// * Specify access permissions explicitly with the `x-amz-grant-read`, `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and `x-amz-grant-full-control` headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). - /// - /// - /// You can use either a canned ACL or specify access permissions explicitly. You cannot do both. - /// - /// Server-Side- Encryption-Specific Request Headers + /// This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_. /// - /// Amazon S3 encrypts data by using server-side encryption with an Amazon S3 managed key (SSE-S3) by default. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can request that Amazon S3 encrypts data at rest by using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption keys (SSE-C). + /// After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload. /// - /// * Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (`aws/s3`) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request. + /// If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). /// - /// * `x-amz-server-side-encryption` + /// * **Directory buckets** \- S3 Lifecycle is not supported by directory buckets. /// - /// * `x-amz-server-side-encryption-aws-kms-key-id` + /// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// * `x-amz-server-side-encryption-context` /// + /// Request signing /// - /// If you specify `x-amz-server-side-encryption:aws:kms`, but don't provide `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the Amazon Web Services managed key (`aws/s3` key) in KMS to protect the data. + /// For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) in the _Amazon S3 User Guide_. /// - /// All `GET` and `PUT` requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. - /// - /// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html). - /// - /// * Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request. - /// - /// * `x-amz-server-side-encryption-customer-algorithm` - /// - /// * `x-amz-server-side-encryption-customer-key` - /// - /// * `x-amz-server-side-encryption-customer-key-MD5` - /// - /// - /// For more information about server-side encryption with customer-provided encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). - /// - /// - /// Access-Control-List (ACL)-Specific Request Headers - /// - /// You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see [Using ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). With this operation, you can grant access permissions using one of the following two methods: + /// Permissions /// - /// * Specify a canned ACL (`x-amz-acl`) — Amazon S3 supports a set of predefined ACLs, known as _canned ACLs_. Each canned ACL has a predefined set of grantees and permissions. For more information, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + /// * **General purpose bucket permissions** \- For information about the permissions required to use the multipart upload API, see [Multipart upload and permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// - /// * Specify access permissions explicitly — To explicitly grant access permissions to specific Amazon Web Services accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use: + /// To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the `kms:Decrypt` and `kms:GenerateDataKey*` actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see [Multipart upload API and permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) and [Protecting data using server-side encryption with Amazon Web Services KMS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. /// - /// * `x-amz-grant-read` + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// * `x-amz-grant-write` /// - /// * `x-amz-grant-read-acp` + /// Encryption /// - /// * `x-amz-grant-write-acp` + /// * **General purpose buckets** \- Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) and [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) requests must match the headers you used in the `CreateMultipartUpload` request. /// - /// * `x-amz-grant-full-control` + /// * Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (`aws/s3`) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request. /// + /// * `x-amz-server-side-encryption` /// - /// You specify each grantee as a type=value pair, where the type is one of the following: + /// * `x-amz-server-side-encryption-aws-kms-key-id` /// - /// * `id` – if the value specified is the canonical user ID of an Amazon Web Services account + /// * `x-amz-server-side-encryption-context` /// - /// * `uri` – if you are granting permissions to a predefined group /// - /// * `emailAddress` – if the value specified is the email address of an Amazon Web Services account + /// * If you specify `x-amz-server-side-encryption:aws:kms`, but don't provide `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the Amazon Web Services managed key (`aws/s3` key) in KMS to protect the data. /// - /// Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions: + /// * To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the `kms:Decrypt` and `kms:GenerateDataKey*` actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see [Multipart upload API and permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) and [Protecting data using server-side encryption with Amazon Web Services KMS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. /// - /// * US East (N. Virginia) + /// * If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role. /// - /// * US West (N. California) + /// * All `GET` and `PUT` requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) in the _Amazon S3 User Guide_. /// - /// * US West (Oregon) /// - /// * Asia Pacific (Singapore) + /// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) in the _Amazon S3 User Guide_. /// - /// * Asia Pacific (Sydney) + /// * Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request. /// - /// * Asia Pacific (Tokyo) + /// * `x-amz-server-side-encryption-customer-algorithm` /// - /// * Europe (Ireland) + /// * `x-amz-server-side-encryption-customer-key` /// - /// * South America (São Paulo) + /// * `x-amz-server-side-encryption-customer-key-MD5` /// /// - /// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the Amazon Web Services General Reference. + /// For more information about server-side encryption with customer-provided encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. /// + /// * **Directory buckets** -For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. /// - /// For example, the following `x-amz-grant-read` header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata: /// - /// `x-amz-grant-read: id="11112222333", id="444455556666"` + /// HTTP Host header syntax /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `CreateMultipartUpload`: /// @@ -452,15 +398,42 @@ class S3Client { ); } - /// Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects but will still respond that the command was successful. + /// Removes an object from a bucket. The behavior depends on the bucket's versioning state: + /// + /// * If versioning is enabled, the operation removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects but will still respond that the command was successful. + /// + /// * If versioning is suspended or not enabled, the operation permanently deletes the object. + /// + /// + /// * **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the `null` value of the version ID is supported by directory buckets. You can only specify `null` to the `versionId` query parameter in the request. + /// + /// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// + /// + /// To remove a specific version, you must use the `versionId` query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header `x-amz-delete-marker` to true. + /// + /// If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the `x-amz-mfa` request header in the DELETE `versionId` request. Requests that include `x-amz-mfa` must use HTTPS. For more information about MFA Delete, see [Using MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) in the _Amazon S3 User Guide_. To see sample requests that use versioning, see [Sample Request](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). + /// + /// **Directory buckets** \- MFA delete is not supported by directory buckets. + /// + /// You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the `s3:DeleteObject`, `s3:DeleteObjectVersion`, and `s3:PutLifeCycleConfiguration` actions. + /// + /// **Directory buckets** \- S3 Lifecycle is not supported by directory buckets. + /// + /// Permissions /// - /// To remove a specific version, you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, `x-amz-delete-marker`, to true. + /// * **General purpose bucket permissions** \- The following permissions are required in your policies when your `DeleteObjects` request includes specific headers. /// - /// If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the `x-amz-mfa` request header in the DELETE `versionId` request. Requests that include `x-amz-mfa` must use HTTPS. + /// * **`s3:DeleteObject`** \- To delete an object from a bucket, you must always have the `s3:DeleteObject` permission. /// - /// For more information about MFA Delete, see [Using MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). To see sample requests that use versioning, see [Sample Request](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). + /// * **`s3:DeleteObjectVersion`** \- To delete a specific version of an object from a versiong-enabled bucket, you must have the `s3:DeleteObjectVersion` permission. /// - /// You can delete objects by explicitly calling DELETE Object or configure its lifecycle ([PutBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the `s3:DeleteObject`, `s3:DeleteObjectVersion`, and `s3:PutLifeCycleConfiguration` actions. + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following action is related to `DeleteObject`: /// @@ -484,15 +457,42 @@ class S3Client { ); } - /// This action enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this action provides a suitable alternative to sending individual delete requests, reducing per-request overhead. + /// This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead. + /// + /// The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted. + /// + /// * **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. + /// + /// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// + /// + /// The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body. + /// + /// When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see [MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) in the _Amazon S3 User Guide_. + /// + /// **Directory buckets** \- MFA delete is not supported by directory buckets. + /// + /// Permissions /// - /// The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete action and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted. + /// * **General purpose bucket permissions** \- The following permissions are required in your policies when your `DeleteObjects` request includes specific headers. /// - /// The action supports two modes for the response: verbose and quiet. By default, the action uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete action encountered an error. For a successful deletion, the action does not return any information about the delete in the response body. + /// * **`s3:DeleteObject`** \- To delete an object from a bucket, you must always specify the `s3:DeleteObject` permission. /// - /// When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see [MFA Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). + /// * **`s3:DeleteObjectVersion`** \- To delete a specific version of an object from a versiong-enabled bucket, you must specify the `s3:DeleteObjectVersion` permission. /// - /// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit. + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . + /// + /// + /// Content-MD5 request header + /// + /// * **General purpose bucket** \- The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit. + /// + /// * **Directory bucket** \- The Content-MD5 request header or a additional checksum request header (including `x-amz-checksum-crc32`, `x-amz-checksum-crc32c`, `x-amz-checksum-sha1`, or `x-amz-checksum-sha256`) is required for all Multi-Object Delete requests. + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `DeleteObjects`: /// @@ -524,78 +524,69 @@ class S3Client { ); } - /// Retrieves objects from Amazon S3. To use `GET`, you must have `READ` access to the object. If you grant `READ` access to the anonymous user, you can return the object without using an authorization header. + /// Retrieves an object from Amazon S3. /// - /// An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object `sample.jpg`, you can name it `photos/2006/February/sample.jpg`. + /// In the `GetObject` request, specify the full key name for the object. /// - /// To get an object from such a logical hierarchy, specify the full key name for the object in the `GET` operation. For a virtual hosted-style request example, if you have the object `photos/2006/February/sample.jpg`, specify the resource as `/photos/2006/February/sample.jpg`. For a path-style request example, if you have the object `photos/2006/February/sample.jpg` in the bucket named `examplebucket`, specify the resource as `/examplebucket/photos/2006/February/sample.jpg`. For more information about request types, see [HTTP Host Header Bucket Specification](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). + /// **General purpose buckets** \- Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object `photos/2006/February/sample.jpg`, specify the object key name as `/photos/2006/February/sample.jpg`. For a path-style request example, if you have the object `photos/2006/February/sample.jpg` in the bucket named `examplebucket`, specify the object key name as `/examplebucket/photos/2006/February/sample.jpg`. For more information about request types, see [HTTP Host Header Bucket Specification](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) in the _Amazon S3 User Guide_. /// - /// For more information about returning the ACL of an object, see [GetObjectAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). + /// **Directory buckets** \- Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object `photos/2006/February/sample.jpg` in the bucket named `examplebucket--use1-az5--x-s3`, specify the object key name as `/photos/2006/February/sample.jpg`. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). Otherwise, this action returns an `InvalidObjectState` error. For information about restoring archived objects, see [Restoring Archived Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). + /// Permissions /// - /// Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request error. + /// * **General purpose bucket permissions** \- You must have the required permissions in a policy. To use `GetObject`, you must have the `READ` access to the object (or version). If you grant `READ` access to the anonymous user, the `GetObject` operation returns the object without using an authorization header. For more information, see [Specifying permissions in a policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in the _Amazon S3 User Guide_. /// - /// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers: + /// If you include a `versionId` in your request header, you must have the `s3:GetObjectVersion` permission to access a specific version of an object. The `s3:GetObject` permission is not required in this scenario. /// - /// * `x-amz-server-side-encryption-customer-algorithm` + /// If you request the current version of an object without a specific `versionId` in the request header, only the `s3:GetObject` permission is required. The `s3:GetObjectVersion` permission is not required in this scenario. /// - /// * `x-amz-server-side-encryption-customer-key` + /// If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the `s3:ListBucket` permission. /// - /// * `x-amz-server-side-encryption-customer-key-MD5` + /// * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code `404 Not Found` error. /// + /// * If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code `403 Access Denied` error. /// - /// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// Assuming you have the relevant permission to read object tags, the response also returns the `x-amz-tagging-count` header that provides the count of number of tags associated with the object. You can use [GetObjectTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) to retrieve the tag set associated with an object. /// - /// Permissions + /// Storage classes /// - /// You need the relevant read object (or version) permission for this operation. For more information, see [Specifying Permissions in a Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the `s3:ListBucket` permission. + /// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using [RestoreObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). Otherwise, this operation returns an `InvalidObjectState` error. For information about restoring archived objects, see [Restoring Archived Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) in the _Amazon S3 User Guide_. /// - /// If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code 404 (Not Found) error. + /// **Directory buckets** \- For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Unsupported storage class values won't write a destination object and will respond with the HTTP status code `400 Bad Request`. /// - /// If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code 403 ("access denied") error. + /// Encryption /// - /// Versioning + /// Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for the `GetObject` requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your `GetObject` requests for the object that uses these types of keys, you’ll get an HTTP `400 Bad Request` error. /// - /// By default, the `GET` action returns the current version of an object. To return a different version, use the `versionId` subresource. + /// Overriding response header values through the request /// - /// * If you supply a `versionId`, you need the `s3:GetObjectVersion` permission to access a specific version of an object. If you request a specific version, you do not need to have the `s3:GetObject` permission. If you request the current version without a specific version ID, only `s3:GetObject` permission is required. `s3:GetObjectVersion` permission won't be required. + /// There are times when you want to override certain response header values of a `GetObject` response. For example, you might override the `Content-Disposition` response header value through your `GetObject` request. /// - /// * If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes `x-amz-delete-marker: true` in the response. + /// You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code `200 OK` is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. /// + /// The response headers that you can override for the `GetObject` response are `Cache-Control`, `Content-Disposition`, `Content-Encoding`, `Content-Language`, `Content-Type`, and `Expires`. /// - /// For more information about versioning, see [PutBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). + /// To override values for a set of response headers in the `GetObject` response, you can use the following query parameters in the request. /// - /// Overriding Response Header Values + /// * `response-cache-control` /// - /// There are times when you want to override certain response header values in a `GET` response. For example, you might override the `Content-Disposition` response header value in your `GET` request. + /// * `response-content-disposition` /// - /// You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the `GET` response are `Content-Type`, `Content-Language`, `Expires`, `Cache-Control`, `Content-Disposition`, and `Content-Encoding`. To override these header values in the `GET` response, you use the following request parameters. + /// * `response-content-encoding` /// - /// You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request. + /// * `response-content-language` /// /// * `response-content-type` /// - /// * `response-content-language` - /// /// * `response-expires` /// - /// * `response-cache-control` - /// - /// * `response-content-disposition` - /// - /// * `response-content-encoding` - /// - /// - /// Overriding Response Header Values /// - /// If both of the `If-Match` and `If-Unmodified-Since` headers are present in the request as follows: `If-Match` condition evaluates to `true`, and; `If-Unmodified-Since` condition evaluates to `false`; then, S3 returns 200 OK and the data requested. + /// When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request. /// - /// If both of the `If-None-Match` and `If-Modified-Since` headers are present in the request as follows: `If-None-Match` condition evaluates to `false`, and; `If-Modified-Since` condition evaluates to `true`; then, S3 returns 304 Not Modified response code. + /// HTTP Host header syntax /// - /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `GetObject`: /// @@ -621,59 +612,59 @@ class S3Client { ); } - /// The `HEAD` action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use `HEAD`, you must have READ access to the object. + /// The `HEAD` operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. /// - /// A `HEAD` request has the same options as a `GET` action on an object. The response is identical to the `GET` response except that there is no response body. Because of this, if the `HEAD` request generates an error, it returns a generic `400 Bad Request`, `403 Forbidden` or `404 Not Found` code. It is not possible to retrieve the exact exception beyond these error codes. + /// A `HEAD` request has the same options as a `GET` operation on an object. The response is identical to the `GET` response except that there is no response body. Because of this, if the `HEAD` request generates an error, it returns a generic code, such as `400 Bad Request`, `403 Forbidden`, `404 Not Found`, `405 Method Not Allowed`, `412 Precondition Failed`, or `304 Not Modified`. It's not possible to retrieve the exact exception of these error codes. /// - /// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers: - /// - /// * `x-amz-server-side-encryption-customer-algorithm` + /// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). /// - /// * `x-amz-server-side-encryption-customer-key` + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// * `x-amz-server-side-encryption-customer-key-MD5` + /// Permissions /// + /// * **General purpose bucket permissions** \- To use `HEAD`, you must have the `s3:GetObject` permission. You need the relevant read object (or version) permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) in the _Amazon S3 User Guide_. /// - /// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + /// If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the `s3:ListBucket` permission. /// - /// * Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for `GET` requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request error. + /// * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code `404 Not Found` error. /// - /// * The last modified property in this case is the creation date of the object. + /// * If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code `403 Forbidden` error. /// + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). /// - /// Consider the following when using request headers: + /// Encryption /// - /// * Consideration 1 – If both of the `If-Match` and `If-Unmodified-Since` headers are present in the request as follows: + /// Encryption request headers, like `x-amz-server-side-encryption`, should not be sent for `HEAD` requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The `x-amz-server-side-encryption` header is used when you `PUT` an object to S3 and want to specify the encryption method. If you include this header in a `HEAD` request for an object that uses these types of keys, you’ll get an HTTP `400 Bad Request` error. It's because the encryption method can't be changed when you retrieve the object. /// - /// * `If-Match` condition evaluates to `true`, and; + /// If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: /// - /// * `If-Unmodified-Since` condition evaluates to `false`; + /// * `x-amz-server-side-encryption-customer-algorithm` /// + /// * `x-amz-server-side-encryption-customer-key` /// - /// Then Amazon S3 returns `200 OK` and the data requested. + /// * `x-amz-server-side-encryption-customer-key-MD5` /// - /// * Consideration 2 – If both of the `If-None-Match` and `If-Modified-Since` headers are present in the request as follows: /// - /// * `If-None-Match` condition evaluates to `false`, and; + /// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) in the _Amazon S3 User Guide_. /// - /// * `If-Modified-Since` condition evaluates to `true`; + /// **Directory bucket permissions** \- For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. /// + /// Versioning /// - /// Then Amazon S3 returns the `304 Not Modified` response code. + /// * If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes `x-amz-delete-marker: true` in the response. /// + /// * If the specified version is a delete marker, the response returns a `405 Method Not Allowed` error and the `Last-Modified: timestamp` response header. /// - /// For more information about conditional requests, see [RFC 7232](https://tools.ietf.org/html/rfc7232). /// - /// Permissions + /// * **Directory buckets** \- Delete marker is not supported by directory buckets. /// - /// You need the relevant read object (or version) permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. + /// * **Directory buckets** \- S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the `null` value of the version ID is supported by directory buckets. You can only specify `null` to the `versionId` query parameter in the request. /// - /// * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 returns an HTTP status code 404 error. /// - /// * If you don’t have the `s3:ListBucket` permission, Amazon S3 returns an HTTP status code 403 error. + /// HTTP Host header syntax /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following actions are related to `HeadObject`: /// @@ -699,15 +690,39 @@ class S3Client { ); } - /// This action lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted. + /// This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the `CreateMultipartUpload` request, but has not yet been completed or aborted. + /// + /// **Directory buckets** \- If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. + /// + /// The `ListMultipartUploads` operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the `max-uploads` request parameter. If there are more than 1,000 multipart uploads that satisfy your `ListMultipartUploads` request, the response returns an `IsTruncated` element with the value of `true`, a `NextKeyMarker` element, and a `NextUploadIdMarker` element. To list the remaining multipart uploads, you need to make subsequent `ListMultipartUploads` requests. In these requests, include two query parameters: `key-marker` and `upload-id-marker`. Set the value of `key-marker` to the `NextKeyMarker` value from the previous response. Similarly, set the value of `upload-id-marker` to the `NextUploadIdMarker` value from the previous response. + /// + /// **Directory buckets** \- The `upload-id-marker` element and the `NextUploadIdMarker` element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of `key-marker` to the `NextKeyMarker` value from the previous response. + /// + /// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. + /// + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// + /// Permissions + /// + /// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// - /// This action returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the `max-uploads` parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an `IsTruncated` element with the value true. To list the additional multipart uploads, use the `key-marker` and `upload-id-marker` request parameters. + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time. /// - /// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). + /// Sorting of multipart uploads in response /// - /// For information on permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). + /// * **General purpose bucket** \- In the `ListMultipartUploads` response, the multipart uploads are sorted based on two criteria: + /// + /// * Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys. + /// + /// * Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later. + /// + /// * **Directory bucket** \- In the `ListMultipartUploads` response, the multipart uploads aren't sorted lexicographically based on the object keys. + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `ListMultipartUploads`: /// @@ -739,15 +754,29 @@ class S3Client { ); } - /// Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A `200 OK` response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list. For more information about listing objects, see [Listing object keys programmatically](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) in the _Amazon S3 User Guide_. + /// Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A `200 OK` response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see [Listing object keys programmatically](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) in the _Amazon S3 User Guide_. To get a list of your buckets, see [ListBuckets](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). /// - /// To use this operation, you must have READ access to the bucket. + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// To use this action in an Identity and Access Management (IAM) policy, you must have permission to perform the `s3:ListBucket` action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) and [Managing Access Permissions to Your Amazon S3 Resources](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) in the _Amazon S3 User Guide_. + /// Permissions /// - /// This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, [ListObjects](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). + /// * **General purpose bucket permissions** \- To use this operation, you must have READ access to the bucket. You must have permission to perform the `s3:ListBucket` action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) and [Managing Access Permissions to Your Amazon S3 Resources](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) in the _Amazon S3 User Guide_. + /// + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . + /// + /// + /// Sorting order of returned objects + /// + /// * **General purpose bucket** \- For general purpose buckets, `ListObjectsV2` returns objects in lexicographical order based on their key names. /// - /// To get a list of your buckets, see [ListBuckets](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). + /// * **Directory bucket** \- For directory buckets, `ListObjectsV2` does not return objects in lexicographical order. + /// + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. + /// + /// This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, [ListObjects](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). /// /// The following operations are related to `ListObjectsV2`: /// @@ -776,13 +805,28 @@ class S3Client { ); } - /// Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the `max-parts` request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an `IsTruncated` field with the value of true, and a `NextPartNumberMarker` element. In subsequent `ListParts` requests you can include the part-number-marker query string parameter and set its value to the `NextPartNumberMarker` field value from the previous response. + /// Lists the parts that have been uploaded for a specific multipart upload. + /// + /// To use this operation, you must provide the `upload ID` in the request. You obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). + /// + /// The `ListParts` request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the `max-parts` request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an `IsTruncated` field with the value of `true`, and a `NextPartNumberMarker` element. To list remaining uploaded parts, in subsequent `ListParts` requests, include the `part-number-marker` query string parameter and set its value to the `NextPartNumberMarker` field value from the previous response. + /// + /// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. + /// + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// + /// Permissions + /// + /// * **General purpose bucket permissions** \- For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// - /// If the upload was created using a checksum algorithm, you will need to have permission to the `kms:Decrypt` action for the request to succeed. + /// If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the `kms:Decrypt` action for the `ListParts` request to succeed. /// - /// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// - /// For information on permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). + /// + /// HTTP Host header syntax + /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `ListParts`: /// @@ -817,34 +861,49 @@ class S3Client { ); } - /// Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it. + /// Adds an object to a bucket. + /// + /// * Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use `PutObject` to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values. + /// + /// * If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner. + /// + /// * **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. + /// /// - /// Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use `PutObject` to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values. + /// Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior: /// - /// Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. To prevent objects from being deleted or overwritten, you can use [Amazon S3 Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). + /// * **S3 Object Lock** \- To prevent objects from being deleted or overwritten, you can use [Amazon S3 Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) in the _Amazon S3 User Guide_. + /// + /// This functionality is not supported for directory buckets. + /// + /// * **S3 Versioning** \- When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) in the _Amazon S3 User Guide_. For information about returning the versioning state of a bucket, see [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). + /// + /// This functionality is not supported for directory buckets. + /// + /// + /// Permissions /// - /// To ensure that data is not corrupted traversing the network, use the `Content-MD5` header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value. + /// * **General purpose bucket permissions** \- The following permissions are required in your policies when your `PutObject` request includes specific headers. /// - /// * To successfully complete the `PutObject` request, you must have the `s3:PutObject` in your IAM permissions. + /// * **`s3:PutObject`** \- To successfully complete the `PutObject` request, you must always have the `s3:PutObject` permission on a bucket to add an object to it. /// - /// * To successfully change the objects acl of your `PutObject` request, you must have the `s3:PutObjectAcl` in your IAM permissions. + /// * **`s3:PutObjectAcl`** \- To successfully change the objects ACL of your `PutObject` request, you must have the `s3:PutObjectAcl`. /// - /// * To successfully set the tag-set with your `PutObject` request, you must have the `s3:PutObjectTagging` in your IAM permissions. + /// * **`s3:PutObjectTagging`** \- To successfully set the tag-set with your `PutObject` request, you must have the `s3:PutObjectTagging`. /// - /// * The `Content-MD5` header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see [Amazon S3 Object Lock Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) in the _Amazon S3 User Guide_. + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . /// /// - /// You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). + /// Data integrity with Content-MD5 /// - /// When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see [Access Control List (ACL) Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and [Managing ACLs Using the REST API](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). + /// * **General purpose bucket** \- To ensure that data is not corrupted traversing the network, use the `Content-MD5` header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value. /// - /// If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the `bucket-owner-full-control` canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a `400` error with the error code `AccessControlListNotSupported`. For more information, see [Controlling ownership of objects and disabling ACLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) in the _Amazon S3 User Guide_. + /// * **Directory bucket** \- This functionality is not supported for directory buckets. /// - /// If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner. /// - /// By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see [Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in the _Amazon S3 User Guide_. + /// HTTP Host header syntax /// - /// If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). For information about returning the versioning state of a bucket, see [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// For more information about related Amazon S3 APIs, see the following: /// @@ -870,15 +929,17 @@ class S3Client { ); } + /// This operation is not supported by directory buckets. + /// /// This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. /// - /// This action is not supported by Amazon S3 on Outposts. + /// This functionality is not supported for Amazon S3 on Outposts. /// /// For more information about Amazon S3 Select, see [Selecting Content from Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) and [SELECT Command](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) in the _Amazon S3 User Guide_. /// /// Permissions /// - /// You must have `s3:GetObject` permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see [Specifying Permissions in a Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in the _Amazon S3 User Guide_. + /// You must have the `s3:GetObject` permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see [Specifying Permissions in a Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in the _Amazon S3 User Guide_. /// /// Object Data Formats /// @@ -942,47 +1003,66 @@ class S3Client { /// Uploads a part in a multipart upload. /// - /// In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) operation. + /// In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) operation. /// - /// You must initiate a multipart upload (see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request. + /// You must initiate a multipart upload (see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request. /// /// Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. /// /// For information about maximum and minimum part sizes and other multipart upload specifications, see [Multipart upload limits](https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the _Amazon S3 User Guide_. /// - /// To ensure that data is not corrupted when traversing the network, specify the `Content-MD5` header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. + /// After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage. /// - /// If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the `x-amz-content-sha256` header as a checksum instead of `Content-MD5`. For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). + /// For more information on multipart uploads, go to [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_ . /// - /// **Note:** After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage. + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// For more information on multipart uploads, go to [Multipart Upload Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the _Amazon S3 User Guide_ . + /// Permissions + /// + /// * **General purpose bucket permissions** \- For information on the permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. + /// + /// * **Directory bucket permissions** \- To grant access to this API operation on a directory bucket, we recommend that you use the [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) API operation for session-based authorization. Specifically, you grant the `s3express:CreateSession` permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the `CreateSession` API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another `CreateSession` API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see [`CreateSession`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) . + /// + /// + /// Data integrity + /// + /// **General purpose bucket** \- To ensure that data is not corrupted traversing the network, specify the `Content-MD5` header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the `x-amz-content-sha256` header as a checksum instead of `Content-MD5`. For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). + /// + /// **Directory buckets** \- MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity. + /// + /// Encryption + /// + /// * **General purpose bucket** \- Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). /// - /// For information on the permissions required to use the multipart upload API, go to [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. + /// Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). /// - /// Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have three mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). For more information, go to [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the _Amazon S3 User Guide_. + /// If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers. /// - /// Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). + /// * x-amz-server-side-encryption-customer-algorithm /// - /// If you requested server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers. + /// * x-amz-server-side-encryption-customer-key /// - /// * x-amz-server-side-encryption-customer-algorithm + /// * x-amz-server-side-encryption-customer-key-MD5 /// - /// * x-amz-server-side-encryption-customer-key + /// * **Directory bucket** \- For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. /// - /// * x-amz-server-side-encryption-customer-key-MD5 /// + /// For more information, see [Using Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the _Amazon S3 User Guide_. /// - /// `UploadPart` has the following special errors: + /// Special errors + /// + /// * Error Code: `NoSuchUpload` + /// + /// * Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. /// - /// * * _Code: NoSuchUpload_ + /// * HTTP Status Code: 404 Not Found /// - /// * _Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed._ + /// * SOAP Fault Code Prefix: Client /// - /// * _HTTP Status Code: 404 Not Found_ /// - /// * _SOAP Fault Code Prefix: Client_ + /// HTTP Host header syntax /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `UploadPart`: /// @@ -1014,66 +1094,74 @@ class S3Client { ); } - /// Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header `x-amz-copy-source` in your request and a byte range by adding the request header `x-amz-copy-source-range` in your request. + /// Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header `x-amz-copy-source` in your request. To specify a byte range, you add the request header `x-amz-copy-source-range` in your request. /// /// For information about maximum and minimum part sizes and other multipart upload specifications, see [Multipart upload limits](https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the _Amazon S3 User Guide_. /// - /// Instead of using an existing object as part data, you might use the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action and provide data in your request. + /// Instead of copying data from an existing object as part data, you might use the [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action to upload new data as a part of an object in your request. /// - /// You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request. + /// You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request. /// - /// For more information about using the `UploadPartCopy` operation, see the following: + /// For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. For information about copying objects using a single atomic action vs. a multipart upload, see [Operations on Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in the _Amazon S3 User Guide_. /// - /// * For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the _Amazon S3 User Guide_. + /// **Directory buckets** \- For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format `https://_bucket_name_.s3express-_az_id_._region_.amazonaws.com/_key-name_` . Path-style requests are not supported. For more information, see [Regional and Zonal endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) in the _Amazon S3 User Guide_. /// - /// * For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. + /// Authentication and authorization /// - /// * For information about copying objects using a single atomic action vs. a multipart upload, see [Operations on Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in the _Amazon S3 User Guide_. + /// All `UploadPartCopy` requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the `x-amz-` prefix, including `x-amz-copy-source`, must be signed. For more information, see [REST Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). /// - /// * For information about using server-side encryption with customer-provided encryption keys with the `UploadPartCopy` operation, see [CopyObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) and [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). + /// **Directory buckets** \- You must use IAM credentials to authenticate and authorize your access to the `UploadPartCopy` API operation, instead of using the temporary security credentials through the `CreateSession` API operation. /// + /// Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. /// - /// Note the following additional considerations about the request headers `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, and `x-amz-copy-source-if-modified-since`: + /// Permissions /// - /// * **Consideration 1** \- If both of the `x-amz-copy-source-if-match` and `x-amz-copy-source-if-unmodified-since` headers are present in the request as follows: + /// You must have `READ` access to the source object and `WRITE` access to the destination bucket. /// - /// `x-amz-copy-source-if-match` condition evaluates to `true`, and; + /// * **General purpose bucket permissions** \- You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an `UploadPartCopy` operation. /// - /// `x-amz-copy-source-if-unmodified-since` condition evaluates to `false`; + /// * If the source object is in a general purpose bucket, you must have the **`s3:GetObject`** permission to read the source object that is being copied. /// - /// Amazon S3 returns `200 OK` and copies the data. + /// * If the destination bucket is a general purpose bucket, you must have the **`s3:PubObject`** permission to write the object copy to the destination bucket. /// - /// * **Consideration 2** \- If both of the `x-amz-copy-source-if-none-match` and `x-amz-copy-source-if-modified-since` headers are present in the request as follows: /// - /// `x-amz-copy-source-if-none-match` condition evaluates to `false`, and; + /// For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the _Amazon S3 User Guide_. /// - /// `x-amz-copy-source-if-modified-since` condition evaluates to `true`; + /// * **Directory bucket permissions** \- You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an `UploadPartCopy` operation. /// - /// Amazon S3 returns `412 Precondition Failed` response code. + /// * If the source object that you want to copy is in a directory bucket, you must have the **`s3express:CreateSession`** permission in the `Action` element of a policy to read the object . By default, the session is in the `ReadWrite` mode. If you want to restrict the access, you can explicitly set the `s3express:SessionMode` condition key to `ReadOnly` on the copy source bucket. /// + /// * If the copy destination is a directory bucket, you must have the **`s3express:CreateSession`** permission in the `Action` element of a policy to write the object to the destination. The `s3express:SessionMode` condition key cannot be set to `ReadOnly` on the copy destination. /// - /// Versioning /// - /// If your bucket has versioning enabled, you could have multiple versions of the same object. By default, `x-amz-copy-source` identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the `x-amz-copy-source`, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the `x-amz-copy-source` and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the `x-amz-copy-source`. + /// For example policies, see [Example bucket policies for S3 Express One Zone](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) in the _Amazon S3 User Guide_. /// - /// You can optionally specify a specific version of the source object to copy by adding the `versionId` subresource as shown in the following example: /// - /// `x-amz-copy-source: /bucket/object?versionId=version id` + /// Encryption + /// + /// * **General purpose buckets** \- For information about using server-side encryption with customer-provided encryption keys with the `UploadPartCopy` operation, see [CopyObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) and [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). + /// + /// * **Directory buckets** \- For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is supported. + /// /// /// Special errors /// - /// * * _Code: NoSuchUpload_ + /// * Error Code: `NoSuchUpload` + /// + /// * Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. + /// + /// * HTTP Status Code: 404 Not Found /// - /// * _Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed._ + /// * Error Code: `InvalidRequest` /// - /// * _HTTP Status Code: 404 Not Found_ + /// * Description: The specified copy source is not supported as a byte-range copy source. /// - /// * * _Code: InvalidRequest_ + /// * HTTP Status Code: 400 Bad Request /// - /// * _Cause: The specified copy source is not supported as a byte-range copy source._ /// - /// * _HTTP Status Code: 400 Bad Request_ + /// HTTP Host header syntax /// + /// **Directory buckets** \- The HTTP Host header syntax is `_Bucket_name_.s3express-_az_id_._region_.amazonaws.com`. /// /// The following operations are related to `UploadPartCopy`: ///