diff --git a/.doc_gen/metadata/s3-directory-buckets_metadata.yaml b/.doc_gen/metadata/s3-directory-buckets_metadata.yaml new file mode 100644 index 00000000000..8c29794f7ed --- /dev/null +++ b/.doc_gen/metadata/s3-directory-buckets_metadata.yaml @@ -0,0 +1,434 @@ +s3-directory-buckets_Hello: + title: Hello &S3; Directory Buckets + title_abbrev: Hello &S3; Directory Buckets + synopsis: get started using &S3; directory buckets. + category: Hello + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: + snippet_tags: + - s3directorybuckets.java2.directory_bucket_hello.main + services: + s3-directory-buckets: {CreateBucket, ListDirectoryBuckets} + +s3-directory-buckets_CreateBucket: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Create an S3 directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_create_bucket.import + - s3directorybuckets.java2.directory_bucket_create_bucket.main + services: + s3-directory-buckets: {CreateBucket} + +s3-directory-buckets_AbortMultipartUpload: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Abort a multipart upload in a directory bucket. + snippet_tags: + - s3directorybuckets.java2.abortmultipartupload.import + - s3directorybuckets.java2.abortmultipartupload.main + services: + s3-directory-buckets: {AbortMultipartUpload} + +s3-directory-buckets_CompleteMultipartUpload: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Complete a multipart upload in a directory bucket. + snippet_tags: + - s3directorybuckets.java2.completedirectorybucketmultipartupload.import + - s3directorybuckets.java2.completedirectorybucketmultipartupload.main + services: + s3-directory-buckets: {CompleteMultipartUpload} + +s3-directory-buckets_CopyObject: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Copy an object from a directory bucket to a directory bucket. + snippet_tags: + - s3directorybuckets.java2.copydirectorybucketobject.import + - s3directorybuckets.java2.copydirectorybucketobject.main + services: + s3-directory-buckets: {CopyObject} + +s3-directory-buckets_CreateMultipartUpload: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Create a multipart upload in a directory bucket. + snippet_tags: + - s3directorybuckets.java2.create_directory_bucket_multipart_upload.import + - s3directorybuckets.java2.directory_bucket_create_multipartupload.main + services: + s3-directory-buckets: {CreateMultipartUpload} + +s3-directory-buckets_DeleteBucket: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Delete an S3 directory bucket. + snippet_tags: + - s3directorybuckets.java2.delete_directory_bucket.import + - s3directorybuckets.java2.directory_bucket_delete.main + services: + s3-directory-buckets: {DeleteBucket} + +s3-directory-buckets_DeleteBucketEncryption: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Delete the encryption configuration for a directory bucket. + snippet_tags: + - s3directorybuckets.java2.delete_directory_bucket_encryption.import + - s3directorybuckets.java2.directory_bucket_delete_bucket_encryption.main + services: + s3-directory-buckets: {DeleteBucketEncryption} + +s3-directory-buckets_DeleteObject: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Delete an object in a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_delete_object.import + - s3directorybuckets.java2.directory_bucket_delete_object.main + services: + s3-directory-buckets: {DeleteObject} + +s3-directory-buckets_DeleteObjects: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Delete multiple objects in a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_delete_objects.import + - s3directorybuckets.java2.directory_bucket_delete_objects.main + services: + s3-directory-buckets: {DeleteObjects} + +s3-directory-buckets_DeleteBucketPolicy: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Delete a bucket policy for a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_delete_policy.import + - s3directorybuckets.java2.directory_bucket_delete_policy.main + services: + s3-directory-buckets: {DeleteBucketPolicy} + +s3-directory-buckets_GeneratePresignedGetURLForDirectoryBucket: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Generate a presigned GET URL for accessing an object in an S3 directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_generate_presigned_url.import + - s3directorybuckets.java2.directory_bucket_generate_presigned_url.main + services: + s3-directory-buckets: {GetObject} + +s3-directory-buckets_GetBucketEncryption: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Get the encryption configuration of a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_get_encryption.import + - s3directorybuckets.java2.directory_bucket_get_encryption.main + services: + s3-directory-buckets: {GetBucketEncryption} + +s3-directory-buckets_GetObject: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Get an object from a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_get_object.import + - s3directorybuckets.java2.directory_bucket_get_object.main + services: + s3-directory-buckets: {GetObject} + +s3-directory-buckets_GetObjectAttributes: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Get an object attributes from a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_get_object_attributes.import + - s3directorybuckets.java2.directory_bucket_get_object_attributes.main + services: + s3-directory-buckets: {GetObjectAttributes} + +s3-directory-buckets_GetBucketPolicy: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Get the policy of a directory bucket. + snippet_tags: + - s3directorybuckets.java2.get_directory_bucket_policy.import + - s3directorybuckets.java2.directory_bucket_get_policy.main + services: + s3-directory-buckets: {GetBucketPolicy} + +s3-directory-buckets_HeadBucket: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Checks if the specified S3 directory bucket exists and is accessible. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_head_bucket.import + - s3directorybuckets.java2.directory_bucket_head_bucket.main + services: + s3-directory-buckets: {HeadBucket} + +s3-directory-buckets_HeadObject: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Get metadata of an object in a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_head_object.import + - s3directorybuckets.java2.directory_bucket_head_object.main + services: + s3-directory-buckets: {HeadObject} + +s3-directory-buckets_ListMultipartUploads: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: List multipart uploads in a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_list_multipart_upload.import + - s3directorybuckets.java2.directory_bucket_list_multipart_upload.main + services: + s3-directory-buckets: {ListMultipartUploads} + +s3-directory-buckets_ListObjectsV2: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: List objects in a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_list_objects_v2.import + - s3directorybuckets.java2.directory_bucket_list_objects_v2.main + services: + s3-directory-buckets: {ListObjectsV2} + +s3-directory-buckets_ListParts: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: List parts of a multipart upload in a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_list_multipart_upload_parts.import + - s3directorybuckets.java2.directory_bucket_list_multipart_upload_parts.main + services: + s3-directory-buckets: {ListParts} + +s3-directory-buckets_ListDirectoryBuckets: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: List all directory buckets. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_list_buckets.import + - s3directorybuckets.java2.directory_bucket_list_buckets.main + services: + s3-directory-buckets: {ListDirectoryBuckets} + +s3-directory-buckets_PutBucketEncryption: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Set bucket encryption to a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_put_encryption.import + - s3directorybuckets.java2.directory_bucket_put_encryption.main + services: + s3-directory-buckets: {PutBucketEncryption} + +s3-directory-buckets_PutObject: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Put an object into a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_put_object.import + - s3directorybuckets.java2.directory_bucket_put_object.main + services: + s3-directory-buckets: {PutObject} + +s3-directory-buckets_PutBucketPolicy: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Apply a bucket policy to a directory bucket. + snippet_tags: + - s3directorybuckets.java2.put_directory_bucket_policy.import + - s3directorybuckets.java2.directory_bucket_put_bucket_policy.main + services: + s3-directory-buckets: {PutBucketPolicy} + +s3-directory-buckets_UploadPartCopy: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Create copy parts based on source object size and copy over individual parts to a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_upload_part_copy.import + - s3directorybuckets.java2.directory_bucket_upload_part_copy.main + services: + s3-directory-buckets: {UploadPartCopy} + +s3-directory-buckets_UploadPart: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3/src/main/java/com/example/s3/directorybucket + sdkguide: + excerpts: + - description: Upload part of a multipart upload for a directory bucket. + snippet_tags: + - s3directorybuckets.java2.directory_bucket_upload_part.import + - s3directorybuckets.java2.directory_bucket_upload_part.main + services: + s3-directory-buckets: {UploadPart} + +s3-directory-buckets_Scenario_ExpressBasics: + title: Learn the basics of Amazon S3 Express One Zone with an &AWS; SDK + title_abbrev: Learn the basics of S3 Express One Zone + synopsis_list: + - Set up a VPC and VPC Endpoint + - Set up the S3 Express Policies, Roles, and User to work with S3 Express buckets + - Create two S3 Clients + - Create two buckets + - Create an object and copy it over + - Demonstrate performance difference + - Populate the buckets to show the lexicographical difference + - Prompt the user to see if they want to clean up the resources + category: Basics + languages: + PHP: + versions: + - sdk_version: 3 + github: php/example_code/s3 + sdkguide: + excerpts: + - description: Run a scenario demonstrating the basics of &S3; Express One Zone. + snippet_tags: + - php.example_code.s3.ExpressBasics + - php.example_code.s3.service.S3Service + services: + s3-directory-buckets: {CreateBucket, CopyObject, GetObject, PutObject, ListObjects, DeleteObject, DeleteBucket} \ No newline at end of file diff --git a/.doc_gen/metadata/s3_metadata.yaml b/.doc_gen/metadata/s3_metadata.yaml index 820cbc490a0..f8f183fc82b 100644 --- a/.doc_gen/metadata/s3_metadata.yaml +++ b/.doc_gen/metadata/s3_metadata.yaml @@ -3481,32 +3481,6 @@ s3_Scenario_ConditionalRequests: - python.example_code.s3.S3ConditionalRequests.wrapper services: s3: {GetObject, PutObject, CopyObject} -s3_Scenario_ExpressBasics: - title: Learn the basics of Amazon S3 Express One Zone with an &AWS; SDK - title_abbrev: Learn the basics of S3 Express One Zone - synopsis_list: - - Set up a VPC and VPC Endpoint - - Set up the S3 Express Policies, Roles, and User to work with S3 Express buckets - - Create two S3 Clients - - Create two buckets - - Create an object and copy it over - - Demonstrate performance difference - - Populate the buckets to show the lexicographical difference - - Prompt the user to see if they want to clean up the resources - category: Basics - languages: - PHP: - versions: - - sdk_version: 3 - github: php/example_code/s3 - sdkguide: - excerpts: - - description: - snippet_tags: - - php.example_code.s3.ExpressBasics - - php.example_code.s3.service.S3Service - services: - s3: {CreateVpc, DescribeRouteTables, CreateVpcEndpoint, CreateBucket, CopyObject, GetObject, PutObject, ListObjects, DeleteObject, DeleteBucket, DeleteVpcEndpoint, DeleteVpc} s3_Scenario_DownloadS3Directory: title: Download S3 'directories' from an &S3long; (&S3;) bucket title_abbrev: Download S3 'directories' diff --git a/.tools/readmes/config.py b/.tools/readmes/config.py index 0420b7da49a..4d3855c85b1 100644 --- a/.tools/readmes/config.py +++ b/.tools/readmes/config.py @@ -54,6 +54,7 @@ "sdk_api_ref": 'https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/services/{{service["name"]}}/package-summary.html', "service_folder_overrides": { "s3-control": "javav2/example_code/s3/src/main/java/com/example/s3/batch", + "s3-directory-buckets": "javav2/example_code/s3/src/main/java/com/example/s3/directorybucket", "medical-imaging": "javav2/example_code/medicalimaging", }, }, diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/AbortDirectoryBucketMultipartUploads.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/AbortDirectoryBucketMultipartUploads.java new file mode 100644 index 00000000000..f0f01955f89 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/AbortDirectoryBucketMultipartUploads.java @@ -0,0 +1,115 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.abortmultipartupload.import] +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; +// snippet-end:[s3directorybuckets.java2.abortmultipartupload.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class AbortDirectoryBucketMultipartUploads { + private static final Logger logger = LoggerFactory.getLogger(AbortDirectoryBucketMultipartUploads.class); + + // snippet-start:[s3directorybuckets.java2.abortmultipartupload.main] + + /** + * Aborts a specific multipart upload for the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to be uploaded + * @param uploadId The upload ID of the multipart upload to abort + * @return True if the multipart upload is successfully aborted, false otherwise + */ + public static boolean abortDirectoryBucketMultipartUpload(S3Client s3Client, String bucketName, + String objectKey, String uploadId) { + logger.info("Aborting multipart upload: {} for bucket: {}", uploadId, bucketName); + try { + // Abort the multipart upload + AbortMultipartUploadRequest abortMultipartUploadRequest = AbortMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectKey) + .uploadId(uploadId) + .build(); + + s3Client.abortMultipartUpload(abortMultipartUploadRequest); + logger.info("Aborted multipart upload: {} for object: {}", uploadId, objectKey); + return true; + } catch (S3Exception e) { + logger.error("Failed to abort multipart upload: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + return false; + } + } + // snippet-end:[s3directorybuckets.java2.abortmultipartupload.main] + + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "largeObject"; // your-object-key + String uploadId; + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Create a Multipart Upload Request + uploadId = createDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey); + + // Abort Multipart Uploads + boolean aborted = abortDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey, uploadId); + if (aborted) { + logger.info("Multipart upload successfully aborted for bucket: {}", bucketName); + } else { + logger.error("Failed to abort multipart upload for bucket: {}", bucketName); + } + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + // Tear down by deleting the bucket + try { + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete the bucket due to S3 error: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CompleteDirectoryBucketMultipartUpload.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CompleteDirectoryBucketMultipartUpload.java new file mode 100644 index 00000000000..62f8df93a3f --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CompleteDirectoryBucketMultipartUpload.java @@ -0,0 +1,143 @@ +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.completedirectorybucketmultipartupload.import] + +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; + +import com.example.s3.util.S3DirectoryBucketUtils; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.completedirectorybucketmultipartupload.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class CompleteDirectoryBucketMultipartUpload { + private static final Logger logger = LoggerFactory.getLogger(CompleteDirectoryBucketMultipartUpload.class); + + // snippet-start:[s3directorybuckets.java2.completedirectorybucketmultipartupload.main] + + /** + * This method completes the multipart upload request by collating all the + * upload parts. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to be uploaded + * @param uploadId The upload ID used to track the multipart upload + * @param uploadParts The list of completed parts + * @return True if the multipart upload is successfully completed, false + * otherwise + */ + public static boolean completeDirectoryBucketMultipartUpload(S3Client s3Client, String bucketName, String objectKey, + String uploadId, List uploadParts) { + try { + CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder() + .parts(uploadParts) + .build(); + CompleteMultipartUploadRequest completeMultipartUploadRequest = CompleteMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectKey) + .uploadId(uploadId) + .multipartUpload(completedMultipartUpload) + .build(); + + CompleteMultipartUploadResponse response = s3Client.completeMultipartUpload(completeMultipartUploadRequest); + logger.info("Multipart upload completed. ETag: {}", response.eTag()); + return true; + } catch (S3Exception e) { + logger.error("Failed to complete multipart upload: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + return false; + } + } + // snippet-end:[s3directorybuckets.java2.completedirectorybucketmultipartupload.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String uploadId; + String objectKey = "largeObject"; + Path filePath = Paths.get("src/main/resources/directoryBucket/sample-large-object.jpg"); + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Create a multipart upload + uploadId = createDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey); + // Perform multipart upload for the directory bucket + List uploadedParts = multipartUploadForDirectoryBucket(s3Client, bucketName, objectKey, + uploadId, filePath); + logger.info("Uploaded parts: {}", uploadedParts); + // Complete Multipart Uploads + boolean completed = completeDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey, uploadId, + uploadedParts); + if (completed) { + logger.info("Multipart upload successfully completed for bucket: {}", bucketName); + } else { + logger.error("Failed to complete multipart upload for bucket: {}", bucketName); + } + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } catch (IOException e) { + logger.error("An I/O error occurred: {}", e.getMessage()); + } finally { + // Error handling + try { + logger.info("Starting cleanup for bucket: {}", bucketName); + S3DirectoryBucketUtils.abortDirectoryBucketMultipartUploads(s3Client, bucketName); + deleteAllObjectsInDirectoryBucket(s3Client, bucketName); + deleteDirectoryBucket(s3Client, bucketName); + logger.info("Cleanup completed for bucket: {}", bucketName); + } catch (S3Exception e) { + logger.error("Error during cleanup: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Unexpected error during cleanup: {}", e.getMessage()); + } finally { + // Close the S3 client + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CopyDirectoryBucketObject.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CopyDirectoryBucketObject.java new file mode 100644 index 00000000000..4d03a3a3de6 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CopyDirectoryBucketObject.java @@ -0,0 +1,124 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.copydirectorybucketobject.import] +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CopyObjectRequest; +import software.amazon.awssdk.services.s3.model.CopyObjectResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; +import java.nio.file.Path; +import java.nio.file.Paths; +import static com.example.s3.util.S3DirectoryBucketUtils.*; +// snippet-end:[s3directorybuckets.java2.copydirectorybucketobject.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class CopyDirectoryBucketObject { + private static final Logger logger = LoggerFactory.getLogger(CopyDirectoryBucketObject.class); + + // snippet-start:[s3directorybuckets.java2.copydirectorybucketobject.main] + /** + * Copies an object from one S3 general purpose bucket to one S3 directory + * bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param sourceBucket The name of the source bucket + * @param objectKey The key (name) of the object to be copied + * @param targetBucket The name of the target bucket + */ + public static void copyDirectoryBucketObject(S3Client s3Client, String sourceBucket, String objectKey, + String targetBucket) { + logger.info("Copying object: {} from bucket: {} to bucket: {}", objectKey, sourceBucket, targetBucket); + + try { + // Create a CopyObjectRequest + CopyObjectRequest copyReq = CopyObjectRequest.builder() + .sourceBucket(sourceBucket) + .sourceKey(objectKey) + .destinationBucket(targetBucket) + .destinationKey(objectKey) + .build(); + + // Copy the object + CopyObjectResponse copyRes = s3Client.copyObject(copyReq); + logger.info("Successfully copied {} from bucket {} into bucket {}. CopyObjectResponse: {}", + objectKey, sourceBucket, targetBucket, copyRes.copyObjectResult().toString()); + + } catch (S3Exception e) { + logger.error("Failed to copy object: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.copydirectorybucketobject.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String sourceDirectoryBucket = "test-source-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String targetDirectoryBucket = "test-destination-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "example-object"; // your-object-key + Path filePath = Paths.get("src/main/resources/directoryBucket/sample1.txt"); // path to your file + + try { + // Create the source and target directory buckets + createDirectoryBucket(s3Client, sourceDirectoryBucket, zone); + createDirectoryBucket(s3Client, targetDirectoryBucket, zone); + // Put an object in the source bucket + putDirectoryBucketObject(s3Client, sourceDirectoryBucket, objectKey, filePath); + // Copy object from the source directory bucket to the target directory bucket + copyDirectoryBucketObject(s3Client, sourceDirectoryBucket, objectKey, targetDirectoryBucket); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + // Error handling + try { + logger.info("Starting cleanup for buckets: {} and {}", sourceDirectoryBucket, targetDirectoryBucket); + deleteAllObjectsInDirectoryBucket(s3Client, sourceDirectoryBucket); + deleteAllObjectsInDirectoryBucket(s3Client, targetDirectoryBucket); + deleteDirectoryBucket(s3Client, sourceDirectoryBucket); + deleteDirectoryBucket(s3Client, targetDirectoryBucket); + logger.info("Cleanup completed for buckets: {} and {}", sourceDirectoryBucket, targetDirectoryBucket); + } catch (S3Exception e) { + logger.error("Error during cleanup: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Unexpected error during cleanup: {}", e.getMessage()); + } finally { + // Close the S3 client + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CreateDirectoryBucket.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CreateDirectoryBucket.java new file mode 100644 index 00000000000..63dc9d0a292 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CreateDirectoryBucket.java @@ -0,0 +1,103 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_create_bucket.import] +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; +import static com.example.s3.util.S3DirectoryBucketUtils.*; +// snippet-end:[s3directorybuckets.java2.directory_bucket_create_bucket.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class CreateDirectoryBucket { + private static final Logger logger = LoggerFactory.getLogger(CreateDirectoryBucket.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_create_bucket.main] + /** + * Creates a new S3 directory bucket in a specified Zone (For example, a + * specified Availability Zone in this code example). + * + * @param s3Client The S3 client used to create the bucket + * @param bucketName The name of the bucket to be created + * @param zone The region where the bucket will be created + * @throws S3Exception if there's an error creating the bucket + */ + public static void createDirectoryBucket(S3Client s3Client, String bucketName, String zone) throws S3Exception { + logger.info("Creating bucket: {}", bucketName); + + CreateBucketConfiguration bucketConfiguration = CreateBucketConfiguration.builder() + .location(LocationInfo.builder() + .type(LocationType.AVAILABILITY_ZONE) + .name(zone).build()) + .bucket(BucketInfo.builder() + .type(BucketType.DIRECTORY) + .dataRedundancy(DataRedundancy.SINGLE_AVAILABILITY_ZONE) + .build()) + .build(); + try { + CreateBucketRequest bucketRequest = CreateBucketRequest.builder() + .bucket(bucketName) + .createBucketConfiguration(bucketConfiguration).build(); + CreateBucketResponse response = s3Client.createBucket(bucketRequest); + logger.info("Bucket created successfully with location: {}", response.location()); + } catch (S3Exception e) { + logger.error("Error creating bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_create_bucket.main] + + public static void main(String[] args) { + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--usw2-az1--x-s3"; + Region region = Region.US_WEST_2; + String zone = "usw2-az1"; + S3Client s3Client = createS3Client(region); + + try { + createDirectoryBucket(s3Client, bucketName, zone); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete the bucket due to S3 error: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CreateDirectoryBucketMultipartUpload.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CreateDirectoryBucketMultipartUpload.java new file mode 100644 index 00000000000..6e63cb5b3c7 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/CreateDirectoryBucketMultipartUpload.java @@ -0,0 +1,115 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.create_directory_bucket_multipart_upload.import] +import com.example.s3.util.S3DirectoryBucketUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; +// snippet-end:[s3directorybuckets.java2.create_directory_bucket_multipart_upload.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class CreateDirectoryBucketMultipartUpload { + private static final Logger logger = LoggerFactory.getLogger(CreateDirectoryBucketMultipartUpload.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_create_multipartupload.main] + /** + * This method creates a multipart upload request that generates a unique upload + * ID used to track + * all the upload parts. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to be uploaded + * @return The upload ID used to track the multipart upload + */ + public static String createDirectoryBucketMultipartUpload(S3Client s3Client, String bucketName, String objectKey) { + logger.info("Creating multipart upload for object: {} in bucket: {}", objectKey, bucketName); + + try { + // Create a CreateMultipartUploadRequest + CreateMultipartUploadRequest createMultipartUploadRequest = CreateMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectKey) + .build(); + + // Initiate the multipart upload + CreateMultipartUploadResponse response = s3Client.createMultipartUpload(createMultipartUploadRequest); + String uploadId = response.uploadId(); + logger.info("Multipart upload initiated. Upload ID: {}", uploadId); + return uploadId; + + } catch (S3Exception e) { + logger.error("Failed to create multipart upload: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_create_multipartupload.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "largeObject"; // your-object-key + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Create multipart upload in the directory bucket + String uploadId = createDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey); + logger.info("Upload ID: {}", uploadId); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + // Abort Multipart Uploads and Tear down by deleting the bucket + try { + logger.info("Aborting Multipart Uploads and Deleting the bucket: {}", bucketName); + S3DirectoryBucketUtils.abortDirectoryBucketMultipartUploads(s3Client, bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Error during cleanup: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Unexpected error during cleanup: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucket.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucket.java new file mode 100644 index 00000000000..68936a2e117 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucket.java @@ -0,0 +1,96 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.delete_directory_bucket.import] +import com.example.s3.util.S3DirectoryBucketUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; +// snippet-end:[s3directorybuckets.java2.delete_directory_bucket.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class DeleteDirectoryBucket { + private static final Logger logger = LoggerFactory.getLogger(DeleteDirectoryBucket.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_delete.main] + /** + * Deletes the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket to delete + */ + public static void deleteDirectoryBucket(S3Client s3Client, String bucketName) { + logger.info("Deleting bucket: {}", bucketName); + + try { + // Create a DeleteBucketRequest + DeleteBucketRequest deleteBucketRequest = DeleteBucketRequest.builder() + .bucket(bucketName) + .build(); + + // Delete the bucket + s3Client.deleteBucket(deleteBucketRequest); + logger.info("Successfully deleted bucket: {}", bucketName); + + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_delete.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Delete the directory bucket + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketEncryption.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketEncryption.java new file mode 100644 index 00000000000..0a54ffe9f47 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketEncryption.java @@ -0,0 +1,95 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.delete_directory_bucket_encryption.import] +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.DeleteBucketEncryptionRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import static com.example.s3.util.S3DirectoryBucketUtils.*; +// snippet-end:[s3directorybuckets.java2.delete_directory_bucket_encryption.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class DeleteDirectoryBucketEncryption { + private static final Logger logger = LoggerFactory.getLogger(DeleteDirectoryBucketEncryption.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_delete_bucket_encryption.main] + /** + * Deletes the encryption configuration from an S3 bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + */ + public static void deleteDirectoryBucketEncryption(S3Client s3Client, String bucketName) { + DeleteBucketEncryptionRequest deleteRequest = DeleteBucketEncryptionRequest.builder() + .bucket(bucketName) + .build(); + + try { + s3Client.deleteBucketEncryption(deleteRequest); + logger.info("Bucket encryption deleted for bucket: {}", bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket encryption: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_delete_bucket_encryption.main] + + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Delete encryption from the directory bucket + deleteDirectoryBucketEncryption(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + // Delete the bucket + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete the bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketObject.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketObject.java new file mode 100644 index 00000000000..75f242be022 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketObject.java @@ -0,0 +1,115 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_delete_object.import] + +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import java.nio.file.Path; +import java.nio.file.Paths; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_delete_object.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class DeleteDirectoryBucketObject { + private static final Logger logger = LoggerFactory.getLogger(DeleteDirectoryBucketObject.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_delete_object.main] + + /** + * Deletes an object from the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to be deleted + */ + public static void deleteDirectoryBucketObject(S3Client s3Client, String bucketName, String objectKey) { + logger.info("Deleting object: {} from bucket: {}", objectKey, bucketName); + + try { + // Create a DeleteObjectRequest + DeleteObjectRequest deleteObjectRequest = DeleteObjectRequest.builder() + .bucket(bucketName) + .key(objectKey) + .build(); + + // Delete the object + s3Client.deleteObject(deleteObjectRequest); + logger.info("Object {} has been deleted", objectKey); + + } catch (S3Exception e) { + logger.error("Failed to delete object: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_delete_object.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "example-object"; // your-object-key + Path filePath = Paths.get("src/main/resources/directoryBucket/sample1.txt"); // path to your file + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Put an object in the bucket + putDirectoryBucketObject(s3Client, bucketName, objectKey, filePath); + // Delete the specified object from the directory bucket + deleteDirectoryBucketObject(s3Client, bucketName, objectKey); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + logger.info("Attempting to delete bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } + +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketObjects.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketObjects.java new file mode 100644 index 00000000000..3ba82b7c374 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketObjects.java @@ -0,0 +1,132 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_delete_objects.import] + +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.Delete; +import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.ObjectIdentifier; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; +// snippet-end:[s3directorybuckets.java2.directory_bucket_delete_objects.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class DeleteDirectoryBucketObjects { + private static final Logger logger = LoggerFactory.getLogger(DeleteDirectoryBucketObjects.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_delete_objects.main] + + /** + * Deletes multiple objects from the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKeys The list of keys (names) of the objects to be deleted + */ + public static void deleteDirectoryBucketObjects(S3Client s3Client, String bucketName, List objectKeys) { + logger.info("Deleting objects from bucket: {}", bucketName); + + try { + // Create a list of ObjectIdentifier + List identifiers = objectKeys.stream() + .map(key -> ObjectIdentifier.builder().key(key).build()) + .toList(); + + // Create a Delete object + Delete delete = Delete.builder() + .objects(identifiers) + .build(); + + // Create a DeleteObjectsRequest + DeleteObjectsRequest deleteObjectsRequest = DeleteObjectsRequest.builder() + .bucket(bucketName) + .delete(delete) + .build(); + + // Delete the objects + DeleteObjectsResponse deleteObjectsResponse = s3Client.deleteObjects(deleteObjectsRequest); + deleteObjectsResponse.deleted().forEach(deleted -> logger.info("Deleted object: {}", deleted.key())); + + } catch (S3Exception e) { + logger.error("Failed to delete objects: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_delete_objects.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + List objectKeys = List.of("example-object-1", "example-object-2"); // your-object-keys + Path filePath1 = Paths.get("src/main/resources/directoryBucket/sample1.txt"); // path to your file + Path filePath2 = Paths.get("src/main/resources/directoryBucket/sample2.txt"); // path to your file + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Put objects in the bucket + putDirectoryBucketObject(s3Client, bucketName, objectKeys.get(0), filePath1); + putDirectoryBucketObject(s3Client, bucketName, objectKeys.get(1), filePath2); + // Delete the specified objects from the directory bucket + deleteDirectoryBucketObjects(s3Client, bucketName, objectKeys); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + // Tear down by deleting the bucket after testing + try { + logger.info("Attempting to delete bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } + +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketPolicy.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketPolicy.java new file mode 100644 index 00000000000..87e12c5a6dd --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketPolicy.java @@ -0,0 +1,128 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_delete_policy.import] + +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.DeleteBucketPolicyRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_delete_policy.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class DeleteDirectoryBucketPolicy { + private static final Logger logger = LoggerFactory.getLogger(DeleteDirectoryBucketPolicy.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_delete_policy.main] + + /** + * Deletes the bucket policy for the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + */ + public static void deleteDirectoryBucketPolicy(S3Client s3Client, String bucketName) { + logger.info("Deleting policy for bucket: {}", bucketName); + + try { + // Create a DeleteBucketPolicyRequest + DeleteBucketPolicyRequest deletePolicyReq = DeleteBucketPolicyRequest.builder() + .bucket(bucketName) + .build(); + + // Delete the bucket policy + s3Client.deleteBucketPolicy(deletePolicyReq); + logger.info("Successfully deleted bucket policy"); + + } catch (S3Exception e) { + logger.error("Failed to delete bucket policy: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_delete_policy.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + + // Get AWS account ID + String awsAccountId = getAwsAccountId(); + + // Policy text + String policyText = "{\n" + + " \"Version\": \"2012-10-17\",\n" + + " \"Statement\": [\n" + + " {\n" + + " \"Sid\": \"AdminPolicy\",\n" + + " \"Effect\": \"Allow\",\n" + + " \"Principal\": {\n" + + " \"AWS\": \"arn:aws:iam::" + awsAccountId + ":root\"\n" + + " },\n" + + " \"Action\": \"s3express:*\",\n" + + " \"Resource\": \"arn:aws:s3express:us-west-2:" + awsAccountId + ":bucket/" + bucketName + + "\"\n" + + " }\n" + + " ]\n" + + "}"; + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Put Bucket Policy + putDirectoryBucketPolicy(s3Client, bucketName, policyText); + // Delete Bucket Policy + deleteDirectoryBucketPolicy(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + // Tear down by deleting the bucket + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } + +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GeneratePresignedGetURLForDirectoryBucket.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GeneratePresignedGetURLForDirectoryBucket.java new file mode 100644 index 00000000000..37cfb14bc57 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GeneratePresignedGetURLForDirectoryBucket.java @@ -0,0 +1,137 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_generate_presigned_url.import] + +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Duration; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_generate_presigned_url.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class GeneratePresignedGetURLForDirectoryBucket { + private static final Logger logger = LoggerFactory.getLogger(GeneratePresignedGetURLForDirectoryBucket.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_generate_presigned_url.main] + + /** + * Generates a presigned URL for accessing an object in the specified S3 + * directory bucket. + * + * @param s3Presigner The S3 presigner client used to generate the presigned URL + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to access + * @return A presigned URL for accessing the specified object + */ + public static String generatePresignedGetURLForDirectoryBucket(S3Presigner s3Presigner, String bucketName, + String objectKey) { + logger.info("Generating presigned URL for object: {} in bucket: {}", objectKey, bucketName); + + try { + // Create a GetObjectRequest + GetObjectRequest getObjectRequest = GetObjectRequest.builder() + .bucket(bucketName) + .key(objectKey) + .build(); + + // Create a GetObjectPresignRequest + GetObjectPresignRequest getObjectPresignRequest = GetObjectPresignRequest.builder() + .signatureDuration(Duration.ofMinutes(10)) // Presigned URL valid for 10 minutes + .getObjectRequest(getObjectRequest) + .build(); + + // Generate the presigned URL + PresignedGetObjectRequest presignedGetObjectRequest = s3Presigner.presignGetObject(getObjectPresignRequest); + + // Get the presigned URL + String presignedURL = presignedGetObjectRequest.url().toString(); + logger.info("Presigned URL: {}", presignedURL); + return presignedURL; + + } catch (S3Exception e) { + logger.error("Failed to generate presigned URL: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_generate_presigned_url.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "example-object-2"; // your-object-key + S3Presigner s3Presigner = createS3Presigner(region); + Path filePath = Paths.get("src/main/resources/directoryBucket/sample1.txt"); // path to your file + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Put an object in the bucket + putDirectoryBucketObject(s3Client, bucketName, objectKey, filePath); + // Generate a presigned GET URL for the directory bucket + generatePresignedGetURLForDirectoryBucket(s3Presigner, bucketName, objectKey); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + // Delete all objects in the bucket + logger.info("Deleting the objects in bucket: {}", bucketName); + deleteAllObjectsInDirectoryBucket(s3Client, bucketName); + // Tear down by deleting the bucket after testing + logger.info("Attempting to delete bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to clean up the bucket: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } + +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketEncryption.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketEncryption.java new file mode 100644 index 00000000000..383ef41b0c3 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketEncryption.java @@ -0,0 +1,112 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_get_encryption.import] +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetBucketEncryptionRequest; +import software.amazon.awssdk.services.s3.model.GetBucketEncryptionResponse; +import software.amazon.awssdk.services.s3.model.ServerSideEncryptionRule; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_get_encryption.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class GetDirectoryBucketEncryption { + private static final Logger logger = LoggerFactory.getLogger(GetDirectoryBucketEncryption.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_get_encryption.main] + /** + * Retrieves the encryption configuration for an S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @return The type of server-side encryption applied to the bucket (e.g., + * AES256, aws:kms) + */ + public static String getDirectoryBucketEncryption(S3Client s3Client, String bucketName) { + try { + // Create a GetBucketEncryptionRequest + GetBucketEncryptionRequest getRequest = GetBucketEncryptionRequest.builder() + .bucket(bucketName) + .build(); + + // Retrieve the bucket encryption configuration + GetBucketEncryptionResponse response = s3Client.getBucketEncryption(getRequest); + ServerSideEncryptionRule rule = response.serverSideEncryptionConfiguration().rules().get(0); + + String encryptionType = rule.applyServerSideEncryptionByDefault().sseAlgorithmAsString(); + logger.info("Bucket encryption algorithm: {}", encryptionType); + logger.info("KMS Customer Managed Key ID: {}", rule.applyServerSideEncryptionByDefault().kmsMasterKeyID()); + logger.info("Bucket Key Enabled: {}", rule.bucketKeyEnabled()); + + return encryptionType; + } catch (S3Exception e) { + logger.error("Failed to get bucket encryption: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_get_encryption.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Get the encryption settings of the directory bucket + String encryptionType = getDirectoryBucketEncryption(s3Client, bucketName); + logger.info("Retrieved encryption type: {}", encryptionType); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + logger.info("Attempting to delete bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketObject.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketObject.java new file mode 100644 index 00000000000..9edd13dd563 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketObject.java @@ -0,0 +1,129 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_get_object.import] +import software.amazon.awssdk.core.ResponseBytes; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.nio.file.Paths; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_get_object.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class GetDirectoryBucketObject { + private static final Logger logger = LoggerFactory.getLogger(GetDirectoryBucketObject.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_get_object.main] + /** + * Retrieves an object from the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to be retrieved + * @return The retrieved object as a ResponseInputStream + */ + public static boolean getDirectoryBucketObject(S3Client s3Client, String bucketName, String objectKey) { + logger.info("Retrieving object: {} from bucket: {}", objectKey, bucketName); + + try { + // Create a GetObjectRequest + GetObjectRequest objectRequest = GetObjectRequest.builder() + .key(objectKey) + .bucket(bucketName) + .build(); + + // Retrieve the object as bytes + ResponseBytes objectBytes = s3Client.getObjectAsBytes(objectRequest); + byte[] data = objectBytes.asByteArray(); + + // Print object contents to console + String objectContent = new String(data, StandardCharsets.UTF_8); + logger.info("Object contents: \n{}", objectContent); + + return true; + + } catch (S3Exception e) { + logger.error("Failed to retrieve object: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + return false; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_get_object.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "example-object-2"; // your-object-key + Path filePath = Paths.get("src/main/resources/directoryBucket/sample1.txt"); // path to your file + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Put an object in the bucket + putDirectoryBucketObject(s3Client, bucketName, objectKey, filePath); + // Get the specified object from the directory bucket + boolean objectRetrieved = getDirectoryBucketObject(s3Client, bucketName, objectKey); + if (objectRetrieved) { + logger.info("Object retrieved successfully."); + } else { + logger.error("Failed to retrieve the object."); + } + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + logger.info("Deleting the objects in bucket: {}", bucketName); + deleteAllObjectsInDirectoryBucket(s3Client, bucketName); + logger.info("Attempting to delete bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketObjectAttributes.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketObjectAttributes.java new file mode 100644 index 00000000000..2d451cbe6a8 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketObjectAttributes.java @@ -0,0 +1,131 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_get_object_attributes.import] +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; +import software.amazon.awssdk.services.s3.model.ObjectAttributes; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import java.nio.file.Path; +import java.nio.file.Paths; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_get_object_attributes.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class GetDirectoryBucketObjectAttributes { + private static final Logger logger = LoggerFactory.getLogger(GetDirectoryBucketObjectAttributes.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_get_object_attributes.main] + /** + * Retrieves attributes for an object in the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to retrieve attributes for + * @return True if the object attributes are successfully retrieved, false + * otherwise + */ + public static boolean getDirectoryBucketObjectAttributes(S3Client s3Client, String bucketName, String objectKey) { + logger.info("Retrieving attributes for object: {} from bucket: {}", objectKey, bucketName); + + try { + // Create a GetObjectAttributesRequest + GetObjectAttributesRequest getObjectAttributesRequest = GetObjectAttributesRequest.builder() + .bucket(bucketName) + .key(objectKey) + .objectAttributes(ObjectAttributes.E_TAG, ObjectAttributes.STORAGE_CLASS, + ObjectAttributes.OBJECT_SIZE) + .build(); + + // Retrieve the object attributes + GetObjectAttributesResponse response = s3Client.getObjectAttributes(getObjectAttributesRequest); + logger.info("Attributes for object {}:", objectKey); + logger.info("ETag: {}", response.eTag()); + logger.info("Storage Class: {}", response.storageClass()); + logger.info("Object Size: {}", response.objectSize()); + return true; + + } catch (S3Exception e) { + logger.error("Failed to retrieve object attributes: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + return false; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_get_object_attributes.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "example-object-2"; // your-object-key + Path filePath = Paths.get("src/main/resources/directoryBucket/sample1.txt"); // path to your file + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Put an object in the bucket + putDirectoryBucketObject(s3Client, bucketName, objectKey, filePath); + // Get the attributes of the specified object from the directory bucket + boolean attributesRetrieved = getDirectoryBucketObjectAttributes(s3Client, bucketName, objectKey); + if (attributesRetrieved) { + logger.info("Object attributes retrieved successfully."); + } else { + logger.error("Failed to retrieve object attributes."); + } + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + // Delete all objects in the bucket + logger.info("Deleting the objects in bucket: {}", bucketName); + deleteAllObjectsInDirectoryBucket(s3Client, bucketName); + // Tear down by deleting the bucket after testing + logger.info("Attempting to delete bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketPolicy.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketPolicy.java new file mode 100644 index 00000000000..5649fc081ec --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/GetDirectoryBucketPolicy.java @@ -0,0 +1,130 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.get_directory_bucket_policy.import] +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetBucketPolicyRequest; +import software.amazon.awssdk.services.s3.model.GetBucketPolicyResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; +// snippet-end:[s3directorybuckets.java2.get_directory_bucket_policy.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class GetDirectoryBucketPolicy { + private static final Logger logger = LoggerFactory.getLogger(GetDirectoryBucketPolicy.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_get_policy.main] + /** + * Retrieves the bucket policy for the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @return The bucket policy text + */ + public static String getDirectoryBucketPolicy(S3Client s3Client, String bucketName) { + logger.info("Getting policy for bucket: {}", bucketName); + + try { + // Create a GetBucketPolicyRequest + GetBucketPolicyRequest policyReq = GetBucketPolicyRequest.builder() + .bucket(bucketName) + .build(); + + // Retrieve the bucket policy + GetBucketPolicyResponse response = s3Client.getBucketPolicy(policyReq); + + // Print and return the policy text + String policyText = response.policy(); + logger.info("Bucket policy: {}", policyText); + return policyText; + + } catch (S3Exception e) { + logger.error("Failed to get bucket policy: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_get_policy.main] + + // Main method for testing + public static void main(String[] args) { + S3Client s3Client = createS3Client(Region.US_WEST_2); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + + // Get AWS account ID + String awsAccountId = getAwsAccountId(); + + // Policy text + String policyText = "{\n" + + " \"Version\": \"2012-10-17\",\n" + + " \"Statement\": [\n" + + " {\n" + + " \"Sid\": \"AdminPolicy\",\n" + + " \"Effect\": \"Allow\",\n" + + " \"Principal\": {\n" + + " \"AWS\": \"arn:aws:iam::" + awsAccountId + ":root\"\n" + + " },\n" + + " \"Action\": \"s3express:*\",\n" + + " \"Resource\": \"arn:aws:s3express:us-west-2:" + awsAccountId + ":bucket/" + bucketName + + "\"\n" + + " }\n" + + " ]\n" + + "}"; + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Put Bucket Policy + putDirectoryBucketPolicy(s3Client, bucketName, policyText); + // Get Bucket Policy + String policy = getDirectoryBucketPolicy(s3Client, bucketName); + logger.info("Retrieved policy: {}", policy); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + // Tear down by deleting the bucket + try { + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/HeadDirectoryBucket.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/HeadDirectoryBucket.java new file mode 100644 index 00000000000..ce3d0201664 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/HeadDirectoryBucket.java @@ -0,0 +1,107 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_head_bucket.import] +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_head_bucket.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class HeadDirectoryBucket { + private static final Logger logger = LoggerFactory.getLogger(HeadDirectoryBucket.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_head_bucket.main] + /** + * Checks if the specified S3 directory bucket exists and is accessible. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket to check + * @return True if the bucket exists and is accessible, false otherwise + */ + public static boolean headDirectoryBucket(S3Client s3Client, String bucketName) { + logger.info("Checking if bucket exists: {}", bucketName); + + try { + // Create a HeadBucketRequest + HeadBucketRequest headBucketRequest = HeadBucketRequest.builder() + .bucket(bucketName) + .build(); + + // Retrieve the bucket metadata + HeadBucketResponse response = s3Client.headBucket(headBucketRequest); + logger.info("Amazon S3 directory bucket: \"{}\" found.", bucketName); + return true; + + } catch (S3Exception e) { + logger.error("Failed to access bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_head_bucket.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Check if the bucket exists + boolean exists = headDirectoryBucket(s3Client, bucketName); + logger.info("Bucket exists: {}", exists); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + // Tear down by deleting the bucket + try { + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/HeadDirectoryBucketObject.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/HeadDirectoryBucketObject.java new file mode 100644 index 00000000000..a0fbc91ce07 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/HeadDirectoryBucketObject.java @@ -0,0 +1,130 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_head_object.import] +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import java.nio.file.Path; +import java.nio.file.Paths; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_head_object.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class HeadDirectoryBucketObject { + private static final Logger logger = LoggerFactory.getLogger(HeadDirectoryBucketObject.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_head_object.main] + /** + * Retrieves metadata for an object in the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to retrieve metadata for + * @return True if the object exists, false otherwise + */ + public static boolean headDirectoryBucketObject(S3Client s3Client, String bucketName, String objectKey) { + logger.info("Retrieving metadata for object: {} from bucket: {}", objectKey, bucketName); + + try { + // Create a HeadObjectRequest + HeadObjectRequest headObjectRequest = HeadObjectRequest.builder() + .bucket(bucketName) + .key(objectKey) + .build(); + + // Retrieve the object metadata + HeadObjectResponse response = s3Client.headObject(headObjectRequest); + logger.info("Amazon S3 object: \"{}\" found in bucket: \"{}\" with ETag: \"{}\"", objectKey, bucketName, + response.eTag()); + logger.info("Content-Type: {}", response.contentType()); + logger.info("Content-Length: {}", response.contentLength()); + logger.info("Last Modified: {}", response.lastModified()); + return true; + + } catch (S3Exception e) { + logger.error("Failed to retrieve object metadata: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + return false; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_head_object.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "example-object-2"; // your-object-key + Path filePath = Paths.get("src/main/resources/directoryBucket/sample1.txt"); // path to your file + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Put an object in the bucket + putDirectoryBucketObject(s3Client, bucketName, objectKey, filePath); + // Check the object metadata in the directory bucket + + // Check the object metadata in the directory bucket + headDirectoryBucketObject(s3Client, bucketName, objectKey); + boolean objectExists = headDirectoryBucketObject(s3Client, bucketName, objectKey); + if (objectExists) { + logger.info("Object metadata retrieved successfully."); + } else { + logger.error("Failed to retrieve object metadata."); + } + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + // Delete all objects in the bucket + logger.info("Deleting the objects in bucket: {}", bucketName); + deleteAllObjectsInDirectoryBucket(s3Client, bucketName); + // Tear down by deleting the bucket after testing + logger.info("Attempting to delete bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/HelloS3DirectoryBuckets.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/HelloS3DirectoryBuckets.java new file mode 100644 index 00000000000..625b939fafb --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/HelloS3DirectoryBuckets.java @@ -0,0 +1,163 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// snippet-start:[s3directorybuckets.java2.directory_bucket_hello.main] + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_hello.import] +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; +import static com.example.s3.util.S3DirectoryBucketUtils.*; +import java.util.List; +import java.util.stream.Collectors; +// snippet-end:[s3directorybuckets.java2.directory_bucket_hello.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class HelloS3DirectoryBuckets { + private static final Logger logger = LoggerFactory.getLogger(HelloS3DirectoryBuckets.class); + + public static void main(String[] args) { + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--usw2-az1--x-s3"; + Region region = Region.US_WEST_2; + String zone = "usw2-az1"; + S3Client s3Client = createS3Client(region); + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + logger.info("Created bucket: {}", bucketName); + + // List all directory buckets + List bucketNames = listDirectoryBuckets(s3Client); + bucketNames.forEach(name -> logger.info("Bucket Name: {}", name)); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + // Delete the created bucket + deleteDirectoryBucket(s3Client, bucketName); + logger.info("Deleted bucket: {}", bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete the bucket due to S3 error: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } + + /** + * Creates a new S3 directory bucket in a specified Zone (For example, a + * specified Availability Zone in this code example). + * + * @param s3Client The S3 client used to create the bucket + * @param bucketName The name of the bucket to be created + * @param zone The region where the bucket will be created + * @throws S3Exception if there's an error creating the bucket + */ + public static void createDirectoryBucket(S3Client s3Client, String bucketName, String zone) throws S3Exception { + logger.info("Creating bucket: {}", bucketName); + + CreateBucketConfiguration bucketConfiguration = CreateBucketConfiguration.builder() + .location(LocationInfo.builder() + .type(LocationType.AVAILABILITY_ZONE) + .name(zone).build()) + .bucket(BucketInfo.builder() + .type(BucketType.DIRECTORY) + .dataRedundancy(DataRedundancy.SINGLE_AVAILABILITY_ZONE) + .build()) + .build(); + try { + CreateBucketRequest bucketRequest = CreateBucketRequest.builder() + .bucket(bucketName) + .createBucketConfiguration(bucketConfiguration).build(); + CreateBucketResponse response = s3Client.createBucket(bucketRequest); + logger.info("Bucket created successfully with location: {}", response.location()); + } catch (S3Exception e) { + logger.error("Error creating bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Lists all S3 directory buckets. + * + * @param s3Client The S3 client used to interact with S3 + * @return A list of bucket names + */ + public static List listDirectoryBuckets(S3Client s3Client) { + logger.info("Listing all directory buckets"); + + try { + // Create a ListBucketsRequest + ListDirectoryBucketsRequest listBucketsRequest = ListDirectoryBucketsRequest.builder().build(); + + // Retrieve the list of buckets + ListDirectoryBucketsResponse response = s3Client.listDirectoryBuckets(listBucketsRequest); + + // Extract bucket names + List bucketNames = response.buckets().stream() + .map(Bucket::name) + .collect(Collectors.toList()); + + return bucketNames; + } catch (S3Exception e) { + logger.error("Failed to list buckets: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Deletes the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the bucket to delete + */ + public static void deleteDirectoryBucket(S3Client s3Client, String bucketName) { + try { + DeleteBucketRequest deleteBucketRequest = DeleteBucketRequest.builder() + .bucket(bucketName) + .build(); + s3Client.deleteBucket(deleteBucketRequest); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: " + bucketName + " - Error code: " + e.awsErrorDetails().errorCode(), + e); + throw e; + } + } + +} +// snippet-end:[s3directorybuckets.java2.directory_bucket_hello.main] \ No newline at end of file diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBucketMultipartUpload.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBucketMultipartUpload.java new file mode 100644 index 00000000000..f567fc29626 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBucketMultipartUpload.java @@ -0,0 +1,128 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_list_multipart_upload.import] + +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_list_multipart_upload.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class ListDirectoryBucketMultipartUpload { + private static final Logger logger = LoggerFactory.getLogger(ListDirectoryBucketMultipartUpload.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_list_multipart_upload.main] + + /** + * Lists multipart uploads for the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @return A list of MultipartUpload objects representing the multipart uploads + */ + public static List listDirectoryBucketMultipartUploads(S3Client s3Client, String bucketName) { + logger.info("Listing in-progress multipart uploads for bucket: {}", bucketName); + + try { + // Create a ListMultipartUploadsRequest + ListMultipartUploadsRequest listMultipartUploadsRequest = ListMultipartUploadsRequest.builder() + .bucket(bucketName) + .build(); + + // List the multipart uploads + ListMultipartUploadsResponse response = s3Client.listMultipartUploads(listMultipartUploadsRequest); + List uploads = response.uploads(); + for (MultipartUpload upload : uploads) { + logger.info("In-progress multipart upload: Upload ID: {}, Key: {}, Initiated: {}", upload.uploadId(), + upload.key(), upload.initiated()); + } + return uploads; + + } catch (S3Exception e) { + logger.error("Failed to list multipart uploads: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + return List.of(); // Return an empty list if an exception is thrown + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_list_multipart_upload.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "largeObject"; // your-object-key + String uploadId; // your-upload-id + Path filePath = Paths.get("src/main/resources/directoryBucket/sample-large-object.jpg"); // path to your file + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Create a multipart upload + uploadId = createDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey); + // Perform multipart upload for the directory bucket + List uploadedParts = multipartUploadForDirectoryBucket(s3Client, bucketName, objectKey, + uploadId, filePath); + // List multipart uploads in the directory bucket + List uploads = listDirectoryBucketMultipartUploads(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } catch (IOException e) { + logger.error("An I/O error occurred: {}", e.getMessage()); + } finally { + try { + logger.info("Aborting Multipart Uploads in bucket: {}", bucketName); + abortDirectoryBucketMultipartUploads(s3Client, bucketName); + + logger.info("Deleting the bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to clean up S3 resources: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to clean up resources due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBucketObjectsV2.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBucketObjectsV2.java new file mode 100644 index 00000000000..ae3e22b89b6 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBucketObjectsV2.java @@ -0,0 +1,123 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_list_objects_v2.import] +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.services.s3.model.S3Object; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.stream.Collectors; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_list_objects_v2.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class ListDirectoryBucketObjectsV2 { + private static final Logger logger = LoggerFactory.getLogger(ListDirectoryBucketObjectsV2.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_list_objects_v2.main] + /** + * Lists objects in the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @return A list of object keys in the bucket + */ + public static List listDirectoryBucketObjectsV2(S3Client s3Client, String bucketName) { + logger.info("Listing objects in bucket: {}", bucketName); + + try { + // Create a ListObjectsV2Request + ListObjectsV2Request listObjectsV2Request = ListObjectsV2Request.builder() + .bucket(bucketName) + .build(); + + // Retrieve the list of objects + ListObjectsV2Response response = s3Client.listObjectsV2(listObjectsV2Request); + + // Extract and return the object keys + return response.contents().stream() + .map(S3Object::key) + .collect(Collectors.toList()); + + } catch (S3Exception e) { + logger.error("Failed to list objects: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_list_objects_v2.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "example-object"; // your-object-key + Path filePath = Paths.get("src/main/resources/directoryBucket/sample1.txt"); // path to your file + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Put an object in the bucket + putDirectoryBucketObject(s3Client, bucketName, objectKey, filePath); + // List objects in the directory bucket using ListObjectsV2 + List objectKeys = listDirectoryBucketObjectsV2(s3Client, bucketName); + objectKeys.forEach(key -> logger.info("Object Key: {}", key)); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + logger.info("Deleting the objects in bucket: {}", bucketName); + deleteAllObjectsInDirectoryBucket(s3Client, bucketName); + + logger.info("Attempting to delete bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBucketParts.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBucketParts.java new file mode 100644 index 00000000000..4d67c80944c --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBucketParts.java @@ -0,0 +1,130 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_list_multipart_upload_parts.import] +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_list_multipart_upload_parts.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class ListDirectoryBucketParts { + private static final Logger logger = LoggerFactory.getLogger(ListDirectoryBucketParts.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_list_multipart_upload_parts.main] + /** + * Lists the parts of a multipart upload for the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object being uploaded + * @param uploadId The upload ID used to track the multipart upload + * @return A list of Part representing the parts of the multipart upload + */ + public static List listDirectoryBucketMultipartUploadParts(S3Client s3Client, String bucketName, + String objectKey, String uploadId) { + logger.info("Listing parts for object: {} in bucket: {}", objectKey, bucketName); + + try { + // Create a ListPartsRequest + ListPartsRequest listPartsRequest = ListPartsRequest.builder() + .bucket(bucketName) + .uploadId(uploadId) + .key(objectKey) + .build(); + + // List the parts of the multipart upload + ListPartsResponse response = s3Client.listParts(listPartsRequest); + List parts = response.parts(); + for (Part part : parts) { + logger.info("Uploaded part: Part number = \"{}\", etag = {}", part.partNumber(), part.eTag()); + } + return parts; + + } catch (S3Exception e) { + logger.error("Failed to list parts: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + return List.of(); // Return an empty list if an exception is thrown + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_list_multipart_upload_parts.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "largeObject"; // your-object-key + String uploadId; // your-upload-id + Path filePath = Paths.get("src/main/resources/directoryBucket/sample-large-object.jpg"); // path to your file + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Create a multipart upload + uploadId = createDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey); + // Perform multipart upload for the directory bucket + List uploadedParts = multipartUploadForDirectoryBucket(s3Client, bucketName, objectKey, + uploadId, filePath); + // List parts of the multipart upload in the directory bucket + List parts = listDirectoryBucketMultipartUploadParts(s3Client, bucketName, objectKey, uploadId); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } catch (IOException e) { + logger.error("An I/O error occurred: {}", e.getMessage()); + } finally { + try { + logger.info("Aborting Multipart Uploads in bucket: {}", bucketName); + abortDirectoryBucketMultipartUploads(s3Client, bucketName); + + logger.info("Deleting the bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to clean up S3 resources: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to clean up resources due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBuckets.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBuckets.java new file mode 100644 index 00000000000..a8505c2371c --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/ListDirectoryBuckets.java @@ -0,0 +1,110 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_list_buckets.import] +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.stream.Collectors; +// snippet-end:[s3directorybuckets.java2.directory_bucket_list_buckets.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html + * in the AWS SDKs and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html + * in the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method + * to enable directory bucket traffic without requiring an internet gateway or + * NAT device. For more information + * on configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, + * see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class ListDirectoryBuckets { + private static final Logger logger = LoggerFactory.getLogger(ListDirectoryBuckets.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_list_buckets.main] + /** + * Lists all S3 directory buckets. + * + * @param s3Client The S3 client used to interact with S3 + * @return A list of bucket names + */ + public static List listDirectoryBuckets(S3Client s3Client) { + logger.info("Listing all directory buckets"); + + try { + // Create a ListBucketsRequest + ListDirectoryBucketsRequest listDirectoryBucketsRequest = ListDirectoryBucketsRequest.builder().build(); + + // Retrieve the list of buckets + ListDirectoryBucketsResponse response = s3Client.listDirectoryBuckets(listDirectoryBucketsRequest); + + // Extract bucket names + List bucketNames = response.buckets().stream() + .map(Bucket::name) + .collect(Collectors.toList()); + + return bucketNames; + } catch (S3Exception e) { + logger.error("Failed to list buckets: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_list_buckets.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // List all directory buckets + List bucketNames = listDirectoryBuckets(s3Client); + bucketNames.forEach(name -> logger.info("Bucket Name: {}", name)); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + // Tear down by deleting the bucket + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/PutDirectoryBucketEncryption.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/PutDirectoryBucketEncryption.java new file mode 100644 index 00000000000..418db14e732 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/PutDirectoryBucketEncryption.java @@ -0,0 +1,142 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_put_encryption.import] +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kms.KmsClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_put_encryption.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class PutDirectoryBucketEncryption { + private static final Logger logger = LoggerFactory.getLogger(PutDirectoryBucketEncryption.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_put_encryption.main] + /** + * Sets the default encryption configuration for an S3 bucket as SSE-KMS. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param kmsKeyId The ID of the customer-managed KMS key + */ + public static void putDirectoryBucketEncryption(S3Client s3Client, String bucketName, String kmsKeyId) { + // Define the default encryption configuration to use SSE-KMS. For directory + // buckets, AWS managed KMS keys aren't supported. Only customer-managed keys + // are supported. + ServerSideEncryptionByDefault encryptionByDefault = ServerSideEncryptionByDefault.builder() + .sseAlgorithm(ServerSideEncryption.AWS_KMS) + .kmsMasterKeyID(kmsKeyId) + .build(); + + // Create a server-side encryption rule to apply the default encryption + // configuration. For directory buckets, the bucketKeyEnabled field is enforced + // to be true. + ServerSideEncryptionRule rule = ServerSideEncryptionRule.builder() + .bucketKeyEnabled(true) + .applyServerSideEncryptionByDefault(encryptionByDefault) + .build(); + + // Create the server-side encryption configuration for the bucket + ServerSideEncryptionConfiguration encryptionConfiguration = ServerSideEncryptionConfiguration.builder() + .rules(rule) + .build(); + + // Create the PutBucketEncryption request + PutBucketEncryptionRequest putRequest = PutBucketEncryptionRequest.builder() + .bucket(bucketName) + .serverSideEncryptionConfiguration(encryptionConfiguration) + .build(); + + // Set the bucket encryption + try { + s3Client.putBucketEncryption(putRequest); + logger.info("SSE-KMS Bucket encryption configuration set for the directory bucket: {}", bucketName); + } catch (S3Exception e) { + logger.error("Failed to set bucket encryption: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_put_encryption.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + S3Client s3Client = S3Client.builder().region(region).build(); + KmsClient kmsClient = createKmsClient(region); + int waitingPeriodInDays = 7; // Set deletion window between 7 and 30 days + String kmsKeyId = null; + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + + // Create a new KMS key + kmsKeyId = createKmsKey(kmsClient); + // Set bucket encryption using the KMS key + putDirectoryBucketEncryption(s3Client, bucketName, kmsKeyId); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + // Tear down by deleting the bucket + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } + + // Schedule key deletion if it was created + if (kmsKeyId != null) { + try { + String deletionDate = scheduleKeyDeletion(kmsClient, kmsKeyId, waitingPeriodInDays); + logger.info("Key scheduled for deletion on: {}", deletionDate); + } catch (S3Exception e) { + logger.error("Failed to schedule key deletion: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to schedule key deletion due to unexpected error: {}", e.getMessage()); + } + } + s3Client.close(); + kmsClient.close(); + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/PutDirectoryBucketObject.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/PutDirectoryBucketObject.java new file mode 100644 index 00000000000..9acde460445 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/PutDirectoryBucketObject.java @@ -0,0 +1,118 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_put_object.import] +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.nio.file.Paths; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_put_object.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class PutDirectoryBucketObject { + private static final Logger logger = LoggerFactory.getLogger(PutDirectoryBucketObject.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_put_object.main] + /** + * Puts an object into the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to be placed in the bucket + * @param filePath The path of the file to be uploaded + */ + public static void putDirectoryBucketObject(S3Client s3Client, String bucketName, String objectKey, Path filePath) { + logger.info("Putting object: {} into bucket: {}", objectKey, bucketName); + + try { + // Create a PutObjectRequest + PutObjectRequest putObj = PutObjectRequest.builder() + .bucket(bucketName) + .key(objectKey) + .build(); + + // Upload the object + s3Client.putObject(putObj, filePath); + logger.info("Successfully placed {} into bucket {}", objectKey, bucketName); + + } catch (UncheckedIOException e) { + throw S3Exception.builder().message("Failed to read the file: " + e.getMessage()).cause(e).build(); + } catch (S3Exception e) { + logger.error("Failed to put object: {}", e.getMessage()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_put_object.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "example-object"; // your-object-key + Path filePath = Paths.get("src/main/resources/directoryBucket/sample1.txt"); // path to your file + + try { + // Create the bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Put an object in the bucket + putDirectoryBucketObject(s3Client, bucketName, objectKey, filePath); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + // Delete all objects in the bucket + logger.info("Deleting the objects in bucket: {}", bucketName); + deleteAllObjectsInDirectoryBucket(s3Client, bucketName); + + // Tear down by deleting the bucket after testing + logger.info("Attempting to delete bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/PutDirectoryBucketPolicy.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/PutDirectoryBucketPolicy.java new file mode 100644 index 00000000000..2432964963d --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/PutDirectoryBucketPolicy.java @@ -0,0 +1,124 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.put_directory_bucket_policy.import] +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.PutBucketPolicyRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; +// snippet-end:[s3directorybuckets.java2.put_directory_bucket_policy.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class PutDirectoryBucketPolicy { + private static final Logger logger = LoggerFactory.getLogger(PutDirectoryBucketPolicy.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_put_bucket_policy.main] + /** + * Sets the bucket policy for the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param policyText The policy text to be applied + */ + public static void putDirectoryBucketPolicy(S3Client s3Client, String bucketName, String policyText) { + logger.info("Setting policy on bucket: {}", bucketName); + logger.info("Policy: {}", policyText); + + try { + PutBucketPolicyRequest policyReq = PutBucketPolicyRequest.builder() + .bucket(bucketName) + .policy(policyText) + .build(); + + s3Client.putBucketPolicy(policyReq); + logger.info("Bucket policy set successfully!"); + + } catch (S3Exception e) { + logger.error("Failed to set bucket policy: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_put_bucket_policy.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + + // Get AWS account ID + String awsAccountId = getAwsAccountId(); + + // Policy text + + String policyText = "{\n" + + " \"Version\": \"2012-10-17\",\n" + + " \"Statement\": [\n" + + " {\n" + + " \"Sid\": \"AdminPolicy\",\n" + + " \"Effect\": \"Allow\",\n" + + " \"Principal\": {\n" + + " \"AWS\": \"arn:aws:iam::" + awsAccountId + ":root\"\n" + + " },\n" + + " \"Action\": \"s3express:*\",\n" + + " \"Resource\": \"arn:aws:s3express:us-west-2:" + awsAccountId + ":bucket/" + bucketName + + "\"\n" + + " }\n" + + " ]\n" + + "}"; + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Put Bucket Policy + putDirectoryBucketPolicy(s3Client, bucketName, policyText); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } finally { + try { + // Tear down by deleting the bucket + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/UploadPartCopyForDirectoryBucket.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/UploadPartCopyForDirectoryBucket.java new file mode 100644 index 00000000000..956dd6c7b5b --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/UploadPartCopyForDirectoryBucket.java @@ -0,0 +1,185 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_upload_part_copy.import] +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import static com.example.s3.util.S3DirectoryBucketUtils.*; +// snippet-end:[s3directorybuckets.java2.directory_bucket_upload_part_copy.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class UploadPartCopyForDirectoryBucket { + private static final Logger logger = LoggerFactory.getLogger(UploadPartCopyForDirectoryBucket.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_upload_part_copy.main] + /** + * Creates copy parts based on source object size and copies over individual + * parts. + * + * @param s3Client The S3 client used to interact with S3 + * @param sourceBucket The name of the source bucket + * @param sourceKey The key (name) of the source object + * @param destinationBucket The name of the destination bucket + * @param destinationKey The key (name) of the destination object + * @param uploadId The upload ID used to track the multipart upload + * @return A list of completed parts + */ + public static List multipartUploadCopyForDirectoryBucket(S3Client s3Client, String sourceBucket, + String sourceKey, String destinationBucket, String destinationKey, String uploadId) { + // Get the object size to track the end of the copy operation + HeadObjectRequest headObjectRequest = HeadObjectRequest.builder() + .bucket(sourceBucket) + .key(sourceKey) + .build(); + HeadObjectResponse headObjectResponse = s3Client.headObject(headObjectRequest); + long objectSize = headObjectResponse.contentLength(); + + logger.info("Source Object size: {}", objectSize); + + // Copy the object using 20 MB parts + long partSize = 20 * 1024 * 1024; // 20 MB + long bytePosition = 0; + int partNum = 1; + List uploadedParts = new ArrayList<>(); + + while (bytePosition < objectSize) { + long lastByte = Math.min(bytePosition + partSize - 1, objectSize - 1); + logger.info("Part Number: {}, Byte Position: {}, Last Byte: {}", partNum, bytePosition, lastByte); + + try { + UploadPartCopyRequest uploadPartCopyRequest = UploadPartCopyRequest.builder() + .sourceBucket(sourceBucket) + .sourceKey(sourceKey) + .destinationBucket(destinationBucket) + .destinationKey(destinationKey) + .uploadId(uploadId) + .copySourceRange("bytes=" + bytePosition + "-" + lastByte) + .partNumber(partNum) + .build(); + UploadPartCopyResponse uploadPartCopyResponse = s3Client.uploadPartCopy(uploadPartCopyRequest); + + CompletedPart part = CompletedPart.builder() + .partNumber(partNum) + .eTag(uploadPartCopyResponse.copyPartResult().eTag()) + .build(); + uploadedParts.add(part); + + bytePosition += partSize; + partNum++; + } catch (S3Exception e) { + logger.error("Failed to copy part number {}: {} - Error code: {}", partNum, + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + return uploadedParts; + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_upload_part_copy.main] + + // Main method for testing + public static void main(String[] args) { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String sourceDirectoryBucket = "test-source-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String targetDirectoryBucket = "test-destination-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String sourceObjectKey = "source-large-object"; // your-source-object-key + String destinationObjectKey = "dest-large-object"; // your-destination-object-key + Path filePath = Paths.get("src/main/resources/directoryBucket/sample-large-object.jpg"); // path to your file + String uploadIdSource; + String uploadIdDest; + + try { + // Create the source and target directory buckets + createDirectoryBucket(s3Client, sourceDirectoryBucket, zone); + createDirectoryBucket(s3Client, targetDirectoryBucket, zone); + // Create a multipart upload to upload the large object to the source directory + // bucket + uploadIdSource = createDirectoryBucketMultipartUpload(s3Client, sourceDirectoryBucket, sourceObjectKey); + // Perform multipart upload for the directory bucket + List uploadedPartsSource = multipartUploadForDirectoryBucket(s3Client, sourceDirectoryBucket, + sourceObjectKey, uploadIdSource, filePath); + // Complete Multipart Uploads + completeDirectoryBucketMultipartUpload(s3Client, sourceDirectoryBucket, sourceObjectKey, uploadIdSource, + uploadedPartsSource); + + // Create a multipart upload to upload the large object to the destination + // directory bucket + uploadIdDest = createDirectoryBucketMultipartUpload(s3Client, targetDirectoryBucket, destinationObjectKey); + // Perform multipart upload copy for the directory bucket + List uploadedPartsDestination = multipartUploadCopyForDirectoryBucket(s3Client, + sourceDirectoryBucket, sourceObjectKey, targetDirectoryBucket, destinationObjectKey, uploadIdDest); + // Complete the multipart upload + completeDirectoryBucketMultipartUpload(s3Client, targetDirectoryBucket, destinationObjectKey, uploadIdDest, + uploadedPartsDestination); + + logger.info("Multipart upload copy completed for source object: {} to the object copy: {}", sourceObjectKey, + destinationObjectKey); + } catch (S3Exception e) { + logger.error("Failed to complete multipart copy: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (IOException e) { + logger.error("An I/O error occurred: {}", e.getMessage()); + } finally { + // Combined try-catch for cleanup operations + try { + logger.info("Aborting Multipart Uploads in bucket: {}", sourceDirectoryBucket); + abortDirectoryBucketMultipartUploads(s3Client, sourceDirectoryBucket); + logger.info("Aborting Multipart Uploads in bucket: {}", targetDirectoryBucket); + abortDirectoryBucketMultipartUploads(s3Client, targetDirectoryBucket); + + logger.info("Deleting the objects in bucket: {}", sourceDirectoryBucket); + deleteAllObjectsInDirectoryBucket(s3Client, sourceDirectoryBucket); + logger.info("Deleting the objects in bucket: {}", targetDirectoryBucket); + deleteAllObjectsInDirectoryBucket(s3Client, targetDirectoryBucket); + + deleteDirectoryBucket(s3Client, sourceDirectoryBucket); + deleteDirectoryBucket(s3Client, targetDirectoryBucket); + } catch (S3Exception e) { + logger.error("Failed to clean up S3 resources: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to clean up resources due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/UploadPartForDirectoryBucket.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/UploadPartForDirectoryBucket.java new file mode 100644 index 00000000000..2f2a41e5232 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/UploadPartForDirectoryBucket.java @@ -0,0 +1,172 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.directorybucket; + +// snippet-start:[s3directorybuckets.java2.directory_bucket_upload_part.import] +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; + +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; + +import static com.example.s3.util.S3DirectoryBucketUtils.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +// snippet-end:[s3directorybuckets.java2.directory_bucket_upload_part.import] + +/** + * Before running this example: + *

+ * The SDK must be able to authenticate AWS requests on your behalf. If you have + * not configured + * authentication for SDKs and tools, see + * https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs + * and Tools Reference Guide. + *

+ * You must have a runtime environment configured with the Java SDK. + * See + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in + * the Developer Guide if this is not set up. + *

+ * To use S3 directory buckets, configure a gateway VPC endpoint. This is the + * recommended method to enable directory bucket traffic without + * requiring an internet gateway or NAT device. For more information on + * configuring VPC gateway endpoints, visit + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway. + *

+ * Directory buckets are available in specific AWS Regions and Zones. For + * details on Regions and Zones supporting directory buckets, see + * https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + */ + +public class UploadPartForDirectoryBucket { + private static final Logger logger = LoggerFactory.getLogger(UploadPartForDirectoryBucket.class); + + // snippet-start:[s3directorybuckets.java2.directory_bucket_upload_part.main] + /** + * This method creates part requests and uploads individual parts to S3. + * While it uses the UploadPart API to upload a single part, it does so + * sequentially to handle multiple parts of a file, returning all the completed + * parts. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to be uploaded + * @param uploadId The upload ID used to track the multipart upload + * @param filePath The path to the file to be uploaded + * @return A list of uploaded parts + * @throws IOException if an I/O error occurs + */ + public static List multipartUploadForDirectoryBucket(S3Client s3Client, String bucketName, + String objectKey, String uploadId, Path filePath) throws IOException { + logger.info("Uploading parts for object: {} in bucket: {}", objectKey, bucketName); + + int partNumber = 1; + List uploadedParts = new ArrayList<>(); + ByteBuffer bb = ByteBuffer.allocate(1024 * 1024 * 5); // 5 MB byte buffer + + // Read the local file, break down into chunks and process + try (RandomAccessFile file = new RandomAccessFile(filePath.toFile(), "r")) { + long fileSize = file.length(); + int position = 0; + + // Sequentially upload parts of the file + while (position < fileSize) { + file.seek(position); + int read = file.getChannel().read(bb); + + bb.flip(); // Swap position and limit before reading from the buffer + UploadPartRequest uploadPartRequest = UploadPartRequest.builder() + .bucket(bucketName) + .key(objectKey) + .uploadId(uploadId) + .partNumber(partNumber) + .build(); + + UploadPartResponse partResponse = s3Client.uploadPart( + uploadPartRequest, + RequestBody.fromByteBuffer(bb)); + + // Build the uploaded part + CompletedPart uploadedPart = CompletedPart.builder() + .partNumber(partNumber) + .eTag(partResponse.eTag()) + .build(); + + // Add the uploaded part to the list + uploadedParts.add(uploadedPart); + + // Log to indicate the part upload is done + logger.info("Uploaded part number: {} with ETag: {}", partNumber, partResponse.eTag()); + + bb.clear(); + position += read; + partNumber++; + } + } catch (S3Exception e) { + logger.error("Failed to list parts: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + throw e; + } + return uploadedParts; + } + // snippet-end:[s3directorybuckets.java2.directory_bucket_upload_part.main] + + // Main method for testing + public static void main(String[] args) throws IOException { + Region region = Region.US_WEST_2; + S3Client s3Client = createS3Client(region); + String zone = "usw2-az1"; + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3"; + String objectKey = "largeObject"; // your-object-key + Path filePath = Paths.get("src/main/resources/directoryBucket/sample-large-object.jpg"); // path to your file + String uploadId; + + try { + // Create the directory bucket + createDirectoryBucket(s3Client, bucketName, zone); + // Create a multipart upload + uploadId = createDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey); + // Perform multipart upload for the directory bucket + List uploadedParts = multipartUploadForDirectoryBucket(s3Client, bucketName, objectKey, + uploadId, filePath); + logger.info("Uploaded parts: {}", uploadedParts); + } catch (S3Exception e) { + logger.error("An error occurred during S3 operations: {} - Error code: {}", + e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + } catch (IOException e) { + logger.error("An I/O error occurred: {}", e.getMessage()); + } finally { + // Combined try-catch for cleanup operations + try { + logger.info("Aborting Multipart Uploads in bucket: {}", bucketName); + abortDirectoryBucketMultipartUploads(s3Client, bucketName); + + logger.info("Deleting the objects in bucket: {}", bucketName); + deleteAllObjectsInDirectoryBucket(s3Client, bucketName); + + logger.info("Attempting to delete bucket: {}", bucketName); + deleteDirectoryBucket(s3Client, bucketName); + } catch (S3Exception e) { + logger.error("Failed to clean up S3 resources: {} - Error code: {}", e.awsErrorDetails().errorMessage(), + e.awsErrorDetails().errorCode()); + } catch (Exception e) { + logger.error("Failed to clean up resources due to unexpected error: {}", e.getMessage()); + } finally { + s3Client.close(); + } + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/util/S3DirectoryBucketUtils.java b/javav2/example_code/s3/src/main/java/com/example/s3/util/S3DirectoryBucketUtils.java new file mode 100644 index 00000000000..f326a4d1917 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/util/S3DirectoryBucketUtils.java @@ -0,0 +1,597 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +package com.example.s3.util; + +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kms.model.*; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.services.sts.StsClient; +import software.amazon.awssdk.services.sts.model.GetCallerIdentityRequest; +import software.amazon.awssdk.services.sts.model.GetCallerIdentityResponse; +import software.amazon.awssdk.services.kms.KmsClient; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.io.UncheckedIOException; +import java.nio.ByteBuffer; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class S3DirectoryBucketUtils { + private static final Logger logger = LoggerFactory.getLogger(S3DirectoryBucketUtils.class); + + /** + * Creates an S3 client with the specified region. + * + * @param region The AWS region. + * @return The S3 client. + */ + public static S3Client createS3Client(Region region) { + return S3Client.builder().region(region).build(); + } + + /** + * Creates a new S3 directory bucket. + * + * @param s3Client The S3 client used to create the bucket + * @param bucketName The name of the bucket to be created + * @param zone The region where the bucket will be created + */ + public static void createDirectoryBucket(S3Client s3Client, String bucketName, String zone) { + logger.info("Creating bucket: {}", bucketName); + + CreateBucketConfiguration bucketConfiguration = CreateBucketConfiguration.builder() + .location(LocationInfo.builder() + .type(LocationType.AVAILABILITY_ZONE) + .name(zone).build()) + .bucket(BucketInfo.builder() + .type(BucketType.DIRECTORY) + .dataRedundancy(DataRedundancy.SINGLE_AVAILABILITY_ZONE) + .build()) + .build(); + + try { + CreateBucketRequest bucketRequest = CreateBucketRequest.builder() + .bucket(bucketName) + .createBucketConfiguration(bucketConfiguration).build(); + CreateBucketResponse response = s3Client.createBucket(bucketRequest); + logger.info("Bucket created successfully with location: {}", response.location()); + } catch (S3Exception e) { + logger.error("Error creating bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Checks if the specified bucket exists. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the bucket to check + * @return true if the bucket exists, false otherwise + */ + public static boolean checkBucketExists(S3Client s3Client, String bucketName) { + logger.info("Checking if bucket exists: {}", bucketName); + try { + HeadBucketRequest headBucketRequest = HeadBucketRequest.builder().bucket(bucketName).build(); + s3Client.headBucket(headBucketRequest); + logger.info("Amazon S3 directory bucket: \"{}\" found.", bucketName); + return true; + } catch (S3Exception e) { + if (e.statusCode() == 404) { + logger.warn("Amazon S3 directory bucket: \"{}\" not found.", bucketName); + return false; + } else { + logger.error("Failed to access bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + } + + /** + * Deletes the specified bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the bucket to delete + */ + public static void deleteDirectoryBucket(S3Client s3Client, String bucketName) { + logger.info("Deleting bucket: {}", bucketName); + + try { + DeleteBucketRequest deleteBucketRequest = DeleteBucketRequest.builder().bucket(bucketName).build(); + s3Client.deleteBucket(deleteBucketRequest); + logger.info("Bucket deleted successfully."); + } catch (S3Exception e) { + logger.error("Error deleting bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Sets the bucket policy for the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param policyText The policy text to be applied + */ + public static void putDirectoryBucketPolicy(S3Client s3Client, String bucketName, String policyText) { + logger.info("Setting policy on bucket: {}", bucketName); + + try { + PutBucketPolicyRequest policyReq = PutBucketPolicyRequest.builder() + .bucket(bucketName) + .policy(policyText) + .build(); + s3Client.putBucketPolicy(policyReq); + logger.info("Bucket policy set successfully!"); + } catch (S3Exception e) { + logger.error("Failed to set bucket policy: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Gets the bucket policy for the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the bucket + * @return The bucket policy as a string + */ + public static String getDirectoryBucketPolicy(S3Client s3Client, String bucketName) { + logger.info("Retrieving policy for bucket: {}", bucketName); + try { + GetBucketPolicyRequest policyRequest = GetBucketPolicyRequest.builder().bucket(bucketName).build(); + GetBucketPolicyResponse policyResponse = s3Client.getBucketPolicy(policyRequest); + String policyText = policyResponse.policy(); + logger.info("Retrieved policy for bucket: {}", bucketName); + return policyText; + } catch (S3Exception e) { + if (e.statusCode() == 404) { + logger.warn("No policy found for bucket: {}", bucketName); + return null; + } else { + logger.error("Failed to retrieve policy for bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + } + + /** + * Retrieves the encryption type for the specified S3 bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the bucket + * @return The type of server-side encryption applied to the bucket (e.g., AES256, aws:kms) + */ + public static String getBucketEncryptionType(S3Client s3Client, String bucketName) { + try { + // Create a request to get the bucket encryption configuration + GetBucketEncryptionRequest getRequest = GetBucketEncryptionRequest.builder().bucket(bucketName).build(); + + // Retrieve the bucket encryption response + GetBucketEncryptionResponse getResponse = s3Client.getBucketEncryption(getRequest); + + // Get the server-side encryption rule from the response + ServerSideEncryptionRule rule = getResponse.serverSideEncryptionConfiguration().rules().get(0); + + // Return the type of server-side encryption applied to the bucket + return rule.applyServerSideEncryptionByDefault().sseAlgorithmAsString(); + } catch (S3Exception e) { + logger.error("Failed to retrieve encryption for bucket: {} - Error code: {}", bucketName, e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Sets the default encryption configuration for an S3 bucket as SSE-KMS. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param kmsKeyId The ID of the customer-managed KMS key + */ + public static void putDirectoryBucketEncryption(S3Client s3Client, String bucketName, String kmsKeyId) { + // Define the default encryption configuration to use SSE-KMS. For directory buckets, AWS managed KMS keys aren't supported. Only customer-managed keys are supported. + ServerSideEncryptionByDefault encryptionByDefault = ServerSideEncryptionByDefault.builder() + .sseAlgorithm(ServerSideEncryption.AWS_KMS) + .kmsMasterKeyID(kmsKeyId) + .build(); + + // Create a server-side encryption rule to apply the default encryption configuration. For directory buckets, the bucketKeyEnabled field is enforced to be true. + ServerSideEncryptionRule rule = ServerSideEncryptionRule.builder() + .bucketKeyEnabled(true) + .applyServerSideEncryptionByDefault(encryptionByDefault) + .build(); + + // Create the server-side encryption configuration for the bucket + ServerSideEncryptionConfiguration encryptionConfiguration = ServerSideEncryptionConfiguration.builder() + .rules(rule) + .build(); + + // Create the PutBucketEncryption request + PutBucketEncryptionRequest putRequest = PutBucketEncryptionRequest.builder() + .bucket(bucketName) + .serverSideEncryptionConfiguration(encryptionConfiguration) + .build(); + + // Set the bucket encryption + try { + s3Client.putBucketEncryption(putRequest); + logger.info("SSE-KMS bucket encryption configuration set for the directory bucket: {}", bucketName); + } catch (S3Exception e) { + logger.error("Failed to set bucket encryption: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + + + /** + * Puts an object into the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to be placed in the bucket + * @param filePath The path of the file to be uploaded + */ + public static void putDirectoryBucketObject(S3Client s3Client, String bucketName, String objectKey, Path filePath) { + logger.info("Putting object: {} into bucket: {}", objectKey, bucketName); + + try { + PutObjectRequest putObj = PutObjectRequest.builder() + .bucket(bucketName) + .key(objectKey) + .build(); + s3Client.putObject(putObj, filePath); + logger.info("Successfully placed {} into bucket {}", objectKey, bucketName); + } catch (UncheckedIOException e) { + throw S3Exception.builder().message("Failed to read the file: " + e.getMessage()).cause(e).build(); + } catch (S3Exception e) { + logger.error("Failed to put object: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Checks if the specified S3 bucket exists. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the bucket to check + * @return True if the bucket exists, false otherwise + */ + public static boolean doesBucketExist(S3Client s3Client, String bucketName) { + try { + // Attempt to retrieve the bucket metadata. If this request succeeds, the bucket exists + s3Client.headBucket(HeadBucketRequest.builder().bucket(bucketName).build()); + return true; // If no exception is thrown, the bucket exists + } catch (NoSuchBucketException e) { + return false; // If NoSuchBucketException is thrown, the bucket does not exist + } catch (S3Exception e) { + logger.error("Failed to check if bucket exists: {} - Error code: {}", bucketName, e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Checks if the specified object exists in the given S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key of the object to check + * @return True if the object exists, false otherwise + */ + public static boolean checkObjectExists(S3Client s3Client, String bucketName, String objectKey) { + try { + // Attempt to retrieve the object's metadata. If this request succeeds, the object exists + s3Client.headObject(HeadObjectRequest.builder().bucket(bucketName).key(objectKey).build()); + return true; // If no exception is thrown, the object exists + } catch (NoSuchKeyException e) { + return false; // If NoSuchKeyException is thrown, the object does not exist + } catch (S3Exception e) { + logger.error("Failed to check if object exists: {} - Error code: {}", objectKey, e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Deletes an object from the specified S3 bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the S3 bucket + * @param objectKey The key (name) of the object to be deleted + */ + public static void deleteObject(S3Client s3Client, String bucketName, String objectKey) { + try { + DeleteObjectRequest deleteObjectRequest = DeleteObjectRequest.builder() + .bucket(bucketName) + .key(objectKey) + .build(); + s3Client.deleteObject(deleteObjectRequest); + logger.info("Successfully deleted object: {} from bucket: {}", objectKey, bucketName); + } catch (S3Exception e) { + logger.error("Error deleting object: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Deletes all objects in the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket to be emptied + */ + public static void deleteAllObjectsInDirectoryBucket(S3Client s3Client, String bucketName) { + try { + ListObjectsV2Request listObjectsRequest = ListObjectsV2Request.builder() + .bucket(bucketName) + .build(); + + ListObjectsV2Response listObjectsResponse; + do { + listObjectsResponse = s3Client.listObjectsV2(listObjectsRequest); + List objects = listObjectsResponse.contents(); + + for (S3Object object : objects) { + DeleteObjectRequest deleteObjectRequest = DeleteObjectRequest.builder() + .bucket(bucketName) + .key(object.key()) + .build(); + s3Client.deleteObject(deleteObjectRequest); + } + + listObjectsRequest = listObjectsRequest.toBuilder() + .continuationToken(listObjectsResponse.nextContinuationToken()) + .build(); + } while (listObjectsResponse.isTruncated()); + + logger.info("Successfully deleted all objects in bucket: {}", bucketName); + } catch (S3Exception e) { + logger.error("Failed to delete objects in bucket: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Method to get AWS account ID + * + * This method uses the AWS Security Token Service (STS) to get the + * account ID of the current AWS account. It builds an STS client, + * sends a GetCallerIdentity request, and retrieves the account ID + * from the response. + * + * @return The AWS account ID + */ + public static String getAwsAccountId() { + StsClient stsClient = StsClient.builder().region(Region.US_WEST_2).build(); + GetCallerIdentityRequest request = GetCallerIdentityRequest.builder().build(); + GetCallerIdentityResponse response = stsClient.getCallerIdentity(request); + return response.account(); + } + + /** + * Creates a new KMS customer-managed key. + * + * @param kmsClient The KMS client used to create the key + * @return The ID of the created KMS key + */ + public static String createKmsKey(KmsClient kmsClient) { + CreateKeyRequest request = CreateKeyRequest.builder() + .description("Customer managed key for S3 bucket encryption") + .keyUsage("ENCRYPT_DECRYPT") + .build(); + + CreateKeyResponse response = kmsClient.createKey(request); + return response.keyMetadata().keyId(); + } + + /** + * Creates a KMS client with the specified region. + * + * @param region The AWS region + * @return The KMS client + */ + public static KmsClient createKmsClient(Region region) { + return KmsClient.builder().region(region).build(); + } + + /** + * Schedules the deletion of the specified customer managed key (CMK). + * + * @param kmsClient The KMS client used to interact with KMS + * @param keyId The ID of the CMK to be deleted + * @param waitingPeriodInDays The waiting period (in days) before the key is + * permanently deleted + * @return The scheduled deletion date + */ + public static String scheduleKeyDeletion(KmsClient kmsClient, String keyId, int waitingPeriodInDays) { + logger.info("Scheduling deletion for key: {}", keyId); + + try { + ScheduleKeyDeletionRequest request = ScheduleKeyDeletionRequest.builder() + .keyId(keyId) + .pendingWindowInDays(waitingPeriodInDays) + .build(); + + ScheduleKeyDeletionResponse response = kmsClient.scheduleKeyDeletion(request); + logger.info("Successfully scheduled key deletion. Deletion date: {}", response.deletionDate()); + return response.deletionDate().toString(); + } catch (KmsException e) { + logger.error("Failed to schedule key deletion: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * Creates and returns an S3Presigner object + * using the specified AWS region. + * + * @param region The AWS region to be used for the S3Presigner + * @return A newly instantiated S3Presigner object + */ + public static S3Presigner createS3Presigner(Region region) { + return S3Presigner.builder() + .region(region) + .build(); + } + + /** + * Aborts multipart uploads for the specified S3 directory bucket. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + */ + public static void abortDirectoryBucketMultipartUploads(S3Client s3Client, String bucketName) { + logger.info("Aborting multipart uploads for bucket: {}", bucketName); + + try { + ListMultipartUploadsRequest listMultipartUploadsRequest = ListMultipartUploadsRequest.builder() + .bucket(bucketName) + .build(); + + ListMultipartUploadsResponse listMultipartUploadsResponse = s3Client + .listMultipartUploads(listMultipartUploadsRequest); + List uploads = listMultipartUploadsResponse.uploads(); + + for (MultipartUpload upload : uploads) { + AbortMultipartUploadRequest abortMultipartUploadRequest = AbortMultipartUploadRequest.builder() + .bucket(bucketName) + .key(upload.key()) + .uploadId(upload.uploadId()) + .build(); + s3Client.abortMultipartUpload(abortMultipartUploadRequest); + logger.info("Aborted multipart upload: {} for object: {}", upload.uploadId(), upload.key()); + } + } catch (S3Exception e) { + logger.error("Failed to abort all multipart uploads: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * This method creates a multipart upload request that generates a unique upload + * ID used to track all the upload parts. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to be uploaded + * @return The upload ID used to track the multipart upload + */ + public static String createDirectoryBucketMultipartUpload(S3Client s3Client, String bucketName, String objectKey) { + logger.info("Creating multipart upload for object: {} in bucket: {}", objectKey, bucketName); + + try { + CreateMultipartUploadRequest createMultipartUploadRequest = CreateMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectKey) + .build(); + + CreateMultipartUploadResponse response = s3Client.createMultipartUpload(createMultipartUploadRequest); + String uploadId = response.uploadId(); + logger.info("Multipart upload initiated. Upload ID: {}", uploadId); + return uploadId; + } catch (S3Exception e) { + logger.error("Failed to create multipart upload: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + /** + * This method creates part requests and uploads individual parts to S3. + * While it uses the UploadPart API to upload a single part, it does so + * sequentially to handle multiple parts of a file, returning all the uploaded + * parts. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to be uploaded + * @param uploadId The upload ID used to track the multipart upload + * @param filePath The path to the file to be uploaded + * @return A list of completed parts + * @throws IOException if an I/O error occurs + */ + public static List multipartUploadForDirectoryBucket(S3Client s3Client, String bucketName, + String objectKey, String uploadId, Path filePath) throws IOException { + logger.info("Uploading parts for object: {} in bucket: {}", objectKey, bucketName); + + int partNumber = 1; + List uploadedParts = new ArrayList<>(); + ByteBuffer bb = ByteBuffer.allocate(1024 * 1024 * 5); // 5 MB byte buffer + + try (RandomAccessFile file = new RandomAccessFile(filePath.toFile(), "r")) { + long fileSize = file.length(); + int position = 0; + + while (position < fileSize) { + file.seek(position); + int read = file.getChannel().read(bb); + + bb.flip(); // Swap position and limit before reading from the buffer + UploadPartRequest uploadPartRequest = UploadPartRequest.builder() + .bucket(bucketName) + .key(objectKey) + .uploadId(uploadId) + .partNumber(partNumber) + .build(); + + UploadPartResponse partResponse = s3Client.uploadPart(uploadPartRequest, RequestBody.fromByteBuffer(bb)); + + CompletedPart uploadedPart = CompletedPart.builder() + .partNumber(partNumber) + .eTag(partResponse.eTag()) + .build(); + + uploadedParts.add(uploadedPart); + + logger.info("Uploaded part number: {} with ETag: {}", partNumber, partResponse.eTag()); + + bb.clear(); + position += read; + partNumber++; + } + } catch (IOException e) { + logger.error("Error reading file: {}", e.getMessage()); + throw e; + } catch (S3Exception e) { + logger.error("Failed to upload part: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + return uploadedParts; + } + + /** + * This method completes the multipart upload request by collating all the + * upload parts. + * + * @param s3Client The S3 client used to interact with S3 + * @param bucketName The name of the directory bucket + * @param objectKey The key (name) of the object to be uploaded + * @param uploadId The upload ID used to track the multipart upload + * @param uploadParts The list of completed parts + */ + public static void completeDirectoryBucketMultipartUpload(S3Client s3Client, String bucketName, String objectKey, + String uploadId, List uploadParts) { + CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder() + .parts(uploadParts) + .build(); + + CompleteMultipartUploadRequest completeMultipartUploadRequest = CompleteMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectKey) + .uploadId(uploadId) + .multipartUpload(completedMultipartUpload) + .build(); + + try { + CompleteMultipartUploadResponse response = s3Client.completeMultipartUpload(completeMultipartUploadRequest); + logger.info("Multipart upload completed. Object Key: {} ETag: {}", response.key(), response.eTag()); + } catch (S3Exception e) { + logger.error("Failed to complete multipart upload: {} - Error code: {}", e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode()); + throw e; + } + } + + +} diff --git a/javav2/example_code/s3/src/main/resources/directoryBucket/sample-large-object.jpg b/javav2/example_code/s3/src/main/resources/directoryBucket/sample-large-object.jpg new file mode 100644 index 00000000000..35602323438 Binary files /dev/null and b/javav2/example_code/s3/src/main/resources/directoryBucket/sample-large-object.jpg differ diff --git a/javav2/example_code/s3/src/main/resources/directoryBucket/sample1.txt b/javav2/example_code/s3/src/main/resources/directoryBucket/sample1.txt new file mode 100644 index 00000000000..feab84d4a49 --- /dev/null +++ b/javav2/example_code/s3/src/main/resources/directoryBucket/sample1.txt @@ -0,0 +1,4 @@ +Utilitatis causa amicitia est quaesita. +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Collatio igitur ista te nihil iuvat. Honesta oratio, Socratica, Platonis etiam. Primum in nostrane potestate est, quid meminerimus? Duo Reges: constructio interrete. Quid, si etiam iucunda memoria est praeteritorum malorum? Si quidem, inquit, tollerem, sed relinquo. An nisi populari fama? + +Quamquam id quidem licebit iis existimare, qui legerint. Summum a vobis bonum voluptas dicitur. At hoc in eo M. Refert tamen, quo modo. Quid sequatur, quid repugnet, vident. Iam id ipsum absurdum, maximum malum neglegi. \ No newline at end of file diff --git a/javav2/example_code/s3/src/main/resources/directoryBucket/sample2.txt b/javav2/example_code/s3/src/main/resources/directoryBucket/sample2.txt new file mode 100644 index 00000000000..b6772e2ff8c --- /dev/null +++ b/javav2/example_code/s3/src/main/resources/directoryBucket/sample2.txt @@ -0,0 +1,13 @@ +Aeque enim contingit omnibus fidibus, ut incontentae sint. +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quae cum ita sint, effectum est nihil esse malum, quod turpe non sit. Itaque nostrum est-quod nostrum dico, artis est-ad ea principia, quae accepimus. Quod totum contra est. Duo Reges: constructio interrete. Atqui iste locus est, Piso, tibi etiam atque etiam confirmandus, inquam; Quamvis enim depravatae non sint, pravae tamen esse possunt. Duarum enim vitarum nobis erunt instituta capienda. + +Non igitur de improbo, sed de callido improbo quaerimus, qualis Q. Audio equidem philosophi vocem, Epicure, sed quid tibi dicendum sit oblitus es. Ex ea difficultate illae fallaciloquae, ut ait Accius, malitiae natae sunt. At multis malis affectus. Nam quibus rebus efficiuntur voluptates, eae non sunt in potestate sapientis. Quis est tam dissimile homini. Ut proverbia non nulla veriora sint quam vestra dogmata. Si quicquam extra virtutem habeatur in bonis. Sed plane dicit quod intellegit. Paulum, cum regem Persem captum adduceret, eodem flumine invectio? + +Qui ita affectus, beatum esse numquam probabis; Sed nimis multa. Nam prius a se poterit quisque discedere quam appetitum earum rerum, quae sibi conducant, amittere. Familiares nostros, credo, Sironem dicis et Philodemum, cum optimos viros, tum homines doctissimos. Quod iam a me expectare noli. Quid ergo? + +Eademne, quae restincta siti? Ita relinquet duas, de quibus etiam atque etiam consideret. Illa videamus, quae a te de amicitia dicta sunt. Eaedem res maneant alio modo. Quid ergo attinet gloriose loqui, nisi constanter loquare? Prioris generis est docilitas, memoria; Portenta haec esse dicit, neque ea ratione ullo modo posse vivi; Beatum, inquit. Bestiarum vero nullum iudicium puto. + +Quem Tiberina descensio festo illo die tanto gaudio affecit, quanto L. Quorum sine causa fieri nihil putandum est. Tria genera bonorum; Nunc dicam de voluptate, nihil scilicet novi, ea tamen, quae te ipsum probaturum esse confidam. Illud dico, ea, quae dicat, praeclare inter se cohaerere. Fortemne possumus dicere eundem illum Torquatum? Hoc tu nunc in illo probas. Cur post Tarentum ad Archytam? + +Indicant pueri, in quibus ut in speculis natura cernitur. +Sed tamen est aliquid, quod nobis non liceat, liceat illis. Virtutis, magnitudinis animi, patientiae, fortitudinis fomentis dolor mitigari solet. Piso igitur hoc modo, vir optimus tuique, ut scis, amantissimus. Non prorsus, inquit, omnisque, qui sine dolore sint, in voluptate, et ea quidem summa, esse dico. Potius inflammat, ut coercendi magis quam dedocendi esse videantur. Virtutis, magnitudinis animi, patientiae, fortitudinis fomentis dolor mitigari solet. Quae fere omnia appellantur uno ingenii nomine, easque virtutes qui habent, ingeniosi vocantur. Nec enim, dum metuit, iustus est, et certe, si metuere destiterit, non erit; \ No newline at end of file diff --git a/javav2/example_code/s3/src/test/java/S3DirectoryBucketIntegrationTest.java b/javav2/example_code/s3/src/test/java/S3DirectoryBucketIntegrationTest.java new file mode 100644 index 00000000000..22f816e7174 --- /dev/null +++ b/javav2/example_code/s3/src/test/java/S3DirectoryBucketIntegrationTest.java @@ -0,0 +1,908 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import com.amazonaws.services.kms.model.KMSInvalidStateException; +import com.example.s3.directorybucket.*; +import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Tag; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kms.KmsClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; + +import static com.example.s3.directorybucket.GetDirectoryBucketEncryption.getDirectoryBucketEncryption; +import static com.example.s3.util.S3DirectoryBucketUtils.*; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +public class S3DirectoryBucketIntegrationTest { + // Logger for the class + private static final Logger logger = LoggerFactory.getLogger(S3DirectoryBucketIntegrationTest.class); + + // AWS S3 client + private static S3Client s3Client; + + // AWS account ID + private static String awsAccountId = getAwsAccountId(); + + // Region and Zone constants + private static final Region region = Region.US_WEST_2; + private static final String ZONE = "usw2-az1"; + + // List to keep track of created buckets + private static final List createBuckets = new ArrayList<>(); + + // Constants for bucket base names + private static final String BUCKET_BASE_NAME = "test-bucket"; + private static final String SOURCE_BUCKET_BASE_NAME = "test-source-bucket"; + private static final String POLICY_BUCKET_BASE_NAME = "test-bucket-policy-bucket-name"; + + // Generated bucket names for tests + private static String testBucketName; + private static String testSourceBucketName; + private static String policyBucketName; + + // Policy text + private static String policyText2; + + // Object keys + private static String objectKey1 = "example-object-1"; + //private static String objectKey2 = "example-object-2"; + private static String mpuObject1 = "large-object-1"; + private static String mpuObject2 = "large-object-2"; + + // File paths + private static String localFilePath1 = "src/main/resources/directoryBucket/sample1.txt"; + private static String localFilePath2 = "src/main/resources/directoryBucket/sample2.txt"; + private static String localLargeFilePath = "src/main/resources/directoryBucket/sample-large-object.jpg"; + private static Path filePath1 = Paths.get(localFilePath1); + private static Path filePath2 = Paths.get(localFilePath2); + private static Path filePathLarge = Paths.get(localLargeFilePath); + + // Static block to initialize time-dependent names + static { + long timestamp = System.currentTimeMillis(); + testBucketName = generateBucketName(BUCKET_BASE_NAME, timestamp); + testSourceBucketName = generateBucketName(SOURCE_BUCKET_BASE_NAME, timestamp); + policyBucketName = generateBucketName(POLICY_BUCKET_BASE_NAME, timestamp); + policyText2 = "{\n" + + " \"Version\": \"2012-10-17\",\n" + + " \"Statement\": [\n" + + " {\n" + + " \"Sid\": \"AdminPolicy\",\n" + + " \"Effect\": \"Allow\",\n" + + " \"Principal\": {\n" + + " \"AWS\": \"arn:aws:iam::" + awsAccountId + ":root\"\n" + + " },\n" + + " \"Action\": \"s3express:*\",\n" + + " \"Resource\": \"arn:aws:s3express:us-west-2:" + awsAccountId + ":bucket/" + policyBucketName + "\"\n" + + " }\n" + + " ]\n" + + "}"; + + } + + // Helper method to generate bucket names + private static String generateBucketName(String baseName, long timestamp) { + return baseName + "-" + timestamp + "--" + ZONE + "--x-s3"; + } + + @BeforeAll + static void setup() throws Exception { + // Initialize the S3 client + s3Client = S3Client.builder().region(region).build(); + + + // Log a message to verify logger configuration + logger.info("Logger initialized and set to INFO level"); + + // Create and set up buckets for various tests + // Create a bucket for general testing + createDirectoryBucket(s3Client, testBucketName, ZONE); + createBuckets.add(testBucketName); + + // Create a source bucket for testing + createDirectoryBucket(s3Client, testSourceBucketName, ZONE); + createBuckets.add(testSourceBucketName); + + // Put objects to the source bucket for object level operation testing + putDirectoryBucketObject(s3Client, testSourceBucketName, objectKey1, filePath1); + //putDirectoryBucketObject(s3Client, testSourceBucketName, objectKey2, filePath2); + + + + // Create a bucket for policy testing + createDirectoryBucket(s3Client, policyBucketName, ZONE); + createBuckets.add(policyBucketName); + + } + + @Test + @Tag("IntegrationTest") + void testCreateBucket() { + String newBucketName = "creation-" + testBucketName; + try { + CreateDirectoryBucket.createDirectoryBucket(s3Client, newBucketName, ZONE); + createBuckets.add(newBucketName); + } catch (RuntimeException rte) { + logger.error("Failed to create bucket '{}': {}", newBucketName, rte.getMessage()); + throw rte; // Re-throw the exception to fail the test if bucket creation fails + } + + // Check if the bucket exists + Assertions.assertTrue(checkBucketExists(s3Client, testBucketName), "Bucket should exist after creation"); + logger.info("Test passed: Bucket '{}' exists", testBucketName); + } + + @Test + @Tag("IntegrationTest") + void testPutBucketPolicy() { + // Initialize policy text with the generated bucket names + String policyText = "{\n" + + " \"Version\": \"2012-10-17\",\n" + + " \"Statement\": [\n" + + " {\n" + + " \"Sid\": \"AdminPolicy\",\n" + + " \"Effect\": \"Allow\",\n" + + " \"Principal\": {\n" + + " \"AWS\": \"arn:aws:iam::" + awsAccountId + ":root\"\n" + + " },\n" + + " \"Action\": \"s3express:*\",\n" + + " \"Resource\": \"arn:aws:s3express:us-west-2:" + awsAccountId + ":bucket/" + testBucketName + "\"\n" + + " }\n" + + " ]\n" + + "}"; + + String appliedPolicy = null; + JsonNode expectedPolicyJson; + JsonNode appliedPolicyJson; + try { + // Apply the bucket policy + putDirectoryBucketPolicy(s3Client, testBucketName, policyText); + + // Verify the policy was applied + appliedPolicy = getDirectoryBucketPolicy(s3Client, testBucketName); + + // Parse policies using JSON + ObjectMapper objectMapper = new ObjectMapper(); + expectedPolicyJson = objectMapper.readTree(policyText); + appliedPolicyJson = objectMapper.readTree(appliedPolicy); + } catch (RuntimeException | IOException e) { + logger.error("An error occurred: {}", e.getMessage()); + throw new RuntimeException("Test failed due to an error", e); + } + + // Compare the policies using JSON parsing + Assertions.assertEquals(expectedPolicyJson, appliedPolicyJson, "Bucket policy should match the expected policy text"); + logger.info("Test passed: Bucket policy for '{}' is as expected", testBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testGetBucketPolicy() { + String retrievedPolicy = null; + JsonNode expectedPolicyJson; + JsonNode retrievedPolicyJson; + try { + // Add a bucket policy to the bucket + putDirectoryBucketPolicy(s3Client, policyBucketName, policyText2); + + // Verify the policy was applied + retrievedPolicy = GetDirectoryBucketPolicy.getDirectoryBucketPolicy(s3Client, policyBucketName); + logger.info("Retrieved policy: {}", retrievedPolicy); + + // Compare the policies using JSON parsing + ObjectMapper objectMapper = new ObjectMapper(); + expectedPolicyJson = objectMapper.readTree(policyText2); + retrievedPolicyJson = objectMapper.readTree(retrievedPolicy); + } catch (RuntimeException | IOException e) { + logger.error("An error occurred: {}", e.getMessage()); + throw new RuntimeException("Test failed due to an error", e); + } + + Assertions.assertEquals(expectedPolicyJson, retrievedPolicyJson, "Bucket policy should match the expected policy text"); + logger.info("Test passed: Retrieved bucket policy for '{}' is as expected", policyBucketName); + } + + @Test + @Tag("IntegrationTest") + void testDeleteBucketPolicy() { + String retrievedPolicy = null; + try { + // Apply the bucket policy + putDirectoryBucketPolicy(s3Client, policyBucketName, policyText2); + + // Delete the bucket policy + DeleteDirectoryBucketPolicy.deleteDirectoryBucketPolicy(s3Client, policyBucketName); + + // Verify the policy was deleted + retrievedPolicy = getDirectoryBucketPolicy(s3Client, policyBucketName); + + } catch (RuntimeException rte) { + if (retrievedPolicy != null) { + logger.error("Bucket policy was not deleted for '{}': {}", policyBucketName, rte.getMessage()); + throw rte; + } else { + logger.warn("No policy found for bucket '{}', which is expected after deletion.", policyBucketName); + } + } + + Assertions.assertNull(retrievedPolicy, "Bucket policy should be null after deletion"); + logger.info("Test passed: Bucket policy for '{}' was deleted as expected", policyBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testPutBucketEncryption() { + KmsClient kmsClient = createKmsClient(region); + final String[] kmsKeyId = new String[1]; + String encryptionType = null; + long timestamp = System.currentTimeMillis(); + final String PUT_ENCRYPTION_BUCKET_BASE_NAME = "test-put-encrypt-bucket-name"; + String testPutEncryptionBucketName = generateBucketName(PUT_ENCRYPTION_BUCKET_BASE_NAME, timestamp); + // Create a bucket for SSE-KMS encryption testing + createDirectoryBucket(s3Client, testPutEncryptionBucketName, ZONE); + createBuckets.add(testPutEncryptionBucketName); + + try { + // Create a new KMS key and set bucket encryption using the KMS key + kmsKeyId[0] = createKmsKey(kmsClient); + PutDirectoryBucketEncryption.putDirectoryBucketEncryption(s3Client, testPutEncryptionBucketName, kmsKeyId[0]); + + // Verify the encryption type of the bucket + encryptionType = getBucketEncryptionType(s3Client, testPutEncryptionBucketName); + } catch (RuntimeException e) { + logger.error("An error occurred during bucket encryption or KMS key handling: {}", e.getMessage()); + throw e; + } + + Assertions.assertEquals("aws:kms", encryptionType, "Bucket should be encrypted with aws:kms"); + logger.info("Test passed: Put bucket encryption successfully for '{}'", testPutEncryptionBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testGetBucketEncryption() { + String encryptionType = null; + + final String GET_ENCRYPTION_BUCKET_BASE_NAME = "test-get-encrypt-bucket-name"; + String testGetEncryptionBucketName; + long timestamp = System.currentTimeMillis(); + testGetEncryptionBucketName = generateBucketName(GET_ENCRYPTION_BUCKET_BASE_NAME, timestamp); + // Create a bucket for get encryption testing + createDirectoryBucket(s3Client, testGetEncryptionBucketName, ZONE); + createBuckets.add(testGetEncryptionBucketName); + + try { + // Retrieve the bucket encryption type + encryptionType = getDirectoryBucketEncryption(s3Client, testGetEncryptionBucketName); + } catch (RuntimeException e) { + logger.error("Failed to get bucket encryption for '{}': {}", testGetEncryptionBucketName, e.getMessage()); + throw e; + } + // Verify the encryption type. By default, S3 directory buckets are encrypted with AES256 when no other encryption types are specified. + Assertions.assertEquals("AES256", encryptionType, "Bucket encryption should be AES256"); + logger.info("Test passed: Got bucket encryption successfully for '{}'", testGetEncryptionBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testDeleteBucketEncryption() { + String encryptionTypeAfterDeletion = null; + long timestamp = System.currentTimeMillis(); + final String DELETE_ENCRYPTION_BUCKET_BASE_NAME = "test-delete-encrypt-bucket-name"; + String testDeleteEncryptionBucketName; + testDeleteEncryptionBucketName = generateBucketName(DELETE_ENCRYPTION_BUCKET_BASE_NAME, timestamp); + // Create a bucket for delete encryption testing + createDirectoryBucket(s3Client, testDeleteEncryptionBucketName, ZONE); + createBuckets.add(testDeleteEncryptionBucketName); + + try { + // Set the bucket encryption to SSE-KMS + String kmsKeyId = createKmsKey(createKmsClient(region)); + putDirectoryBucketEncryption(s3Client, testDeleteEncryptionBucketName, kmsKeyId); + + // Delete the bucket encryption + DeleteDirectoryBucketEncryption.deleteDirectoryBucketEncryption(s3Client, testDeleteEncryptionBucketName); + + // Verify the encryption type after deletion + encryptionTypeAfterDeletion = getDirectoryBucketEncryption(s3Client, testDeleteEncryptionBucketName); + } catch (RuntimeException e) { + logger.error("Failed to delete bucket encryption for '{}': {}", testDeleteEncryptionBucketName, e.getMessage()); + throw e; + } + + // By default, S3 buckets should be encrypted with AES256 if no other encryption is specified + Assertions.assertEquals("AES256", encryptionTypeAfterDeletion, "Bucket encryption should be AES256 after deletion"); + logger.info("Test passed: Deleted bucket encryption successfully for '{}'", testDeleteEncryptionBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testListDirectoryBuckets() { + List bucketNames = null; + + try { + // List directory buckets + bucketNames = ListDirectoryBuckets.listDirectoryBuckets(s3Client); + logger.info("List directory buckets '{}'", bucketNames); + } catch (RuntimeException e) { + logger.error("Failed to list directory buckets: {}", e.getMessage()); + throw e; + } + + // Verify that the bucket list is not empty. There are already some buckets created in the setup, so the list should not be empty. + Assertions.assertFalse(bucketNames.isEmpty(), "The list of directory buckets should not be empty"); + logger.info("Test passed: Listed directory buckets successfully"); + } + + + @Test + @Tag("IntegrationTest") + void testHeadBucket() { + boolean bucketExists = false; + + try { + // Perform the head bucket operation + bucketExists = HeadDirectoryBucket.headDirectoryBucket(s3Client, testBucketName); + } catch (RuntimeException e) { + logger.error("Failed to perform head bucket check for '{}': {}", testBucketName, e.getMessage()); + throw e; + } + + // Verify the bucket exists + Assertions.assertTrue(bucketExists, "The bucket should exist"); + logger.info("Test passed: Head bucket check successfully for '{}'", testBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testDeleteBucket() { + boolean bucketDeleted = false; + final String EXCEPTION_BUCKET_BASE_NAME = "exception-bucket-name"; + String exceptionBucketName; + long timestamp = System.currentTimeMillis(); + exceptionBucketName = generateBucketName(EXCEPTION_BUCKET_BASE_NAME, timestamp); + createDirectoryBucket(s3Client, exceptionBucketName, ZONE); + createBuckets.add(exceptionBucketName); + + try { + // The testing bucket is empty. Delete the bucket. + DeleteDirectoryBucket.deleteDirectoryBucket(s3Client, exceptionBucketName); + + // Verify the bucket has been deleted + bucketDeleted = !doesBucketExist(s3Client, exceptionBucketName); + } catch (RuntimeException e) { + logger.error("Failed to delete bucket for '{}': {}", exceptionBucketName, e.getMessage()); + throw e; + } + + // Verify that the bucket no longer exists + Assertions.assertTrue(bucketDeleted, "The bucket should be deleted"); + logger.info("Test passed: Deleted bucket successfully for '{}'", exceptionBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testPutObject() { + boolean objectExists = false; + + try { + // Put the object into the bucket + PutDirectoryBucketObject.putDirectoryBucketObject(s3Client, testBucketName, objectKey1, filePath1); + + // Check if the object exists + objectExists = checkObjectExists(s3Client, testBucketName, objectKey1); + } catch (RuntimeException e) { + logger.error("Failed to put object into '{}': {}", testBucketName, e.getMessage()); + throw e; + } + + // Verify the object exists in the bucket + Assertions.assertTrue(objectExists, "The object should exist in the bucket after being put"); + logger.info("Test passed: Put object successfully into '{}'", testBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testCopyObject() { + boolean objectExists = false; + long timestamp = System.currentTimeMillis(); + final String DESTINATION_BUCKET_BASE_NAME = "test-destination-bucket"; + String testDestinationBucketName; + testDestinationBucketName = generateBucketName(DESTINATION_BUCKET_BASE_NAME, timestamp); + // Create a destination bucket for copy testing + createDirectoryBucket(s3Client, testDestinationBucketName, ZONE); + createBuckets.add(testDestinationBucketName); + + try { + // Copy the object to the destination bucket + CopyDirectoryBucketObject.copyDirectoryBucketObject(s3Client, testSourceBucketName, objectKey1, testDestinationBucketName); + + // Check if the object exists in the destination bucket + objectExists = checkObjectExists(s3Client, testDestinationBucketName, objectKey1); + } catch (RuntimeException e) { + logger.error("Failed to copy object from '{}' to '{}': {}", testSourceBucketName, testDestinationBucketName, e.getMessage()); + throw e; + } + + // Verify the object exists in the destination bucket + Assertions.assertTrue(objectExists, "The object should exist in the destination bucket after being copied"); + logger.info("Test passed: Copied object successfully from '{}' to '{}'", testSourceBucketName, testDestinationBucketName); + } + + @Test + @Tag("IntegrationTest") + void testListObjectsV2() { + List objectKeys = null; + + try { + // In set up, one object is put into the bucket: testSourceBucketName. + // List objects in the bucket + objectKeys = ListDirectoryBucketObjectsV2.listDirectoryBucketObjectsV2(s3Client, testSourceBucketName); + } catch (RuntimeException e) { + logger.error("Failed to list objects in '{}': {}", testSourceBucketName, e.getMessage()); + throw e; + } + + // Verify that the object list is not empty. There should be at least one object in the bucket. + Assertions.assertFalse(objectKeys.isEmpty(), "The list of objects in the bucket should not be empty"); + logger.info("Test passed: Listed objects in bucket '{}'", testSourceBucketName); + } + + @Test + @Tag("IntegrationTest") + void testGetObject() { + boolean objectRetrieved = false; + + try { + // Retrieve the object from the bucket + objectRetrieved = GetDirectoryBucketObject.getDirectoryBucketObject(s3Client, testSourceBucketName, objectKey1); + } catch (RuntimeException e) { + logger.error("Failed to retrieve object from '{}': {}", testSourceBucketName, e.getMessage()); + throw e; + } + + // Verify the object was retrieved successfully + Assertions.assertTrue(objectRetrieved, "The object should be successfully retrieved from the bucket"); + logger.info("Test passed: Retrieved object successfully from '{}'", testSourceBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testHeadObject() { + boolean objectExists = false; + + try { + // Perform the head object operation + objectExists = HeadDirectoryBucketObject.headDirectoryBucketObject(s3Client, testSourceBucketName, objectKey1); + } catch (RuntimeException e) { + logger.error("Failed to perform head object check for '{}': {}", testSourceBucketName, e.getMessage()); + throw e; + } + + // Verify the object exists + Assertions.assertTrue(objectExists, "The object should exist in the bucket"); + logger.info("Test passed: Head object check successfully for '{}'", testSourceBucketName); + } + + @Test + @Tag("IntegrationTest") + void testGetObjectAttributes() { + boolean attributesRetrieved = false; + + try { + // Retrieve the object attributes from the bucket + attributesRetrieved = GetDirectoryBucketObjectAttributes.getDirectoryBucketObjectAttributes(s3Client, testSourceBucketName, objectKey1); + } catch (RuntimeException e) { + logger.error("Failed to retrieve object attributes from '{}': {}", testSourceBucketName, e.getMessage()); + throw e; + } + + // Verify the object attributes were retrieved successfully + Assertions.assertTrue(attributesRetrieved, "The object attributes should be successfully retrieved from the bucket"); + logger.info("Test passed: Retrieved object attributes successfully from '{}'", testSourceBucketName); + } + + @Test + @Tag("IntegrationTest") + void testGeneratePresignedGetURL() { + // AWS S3 presigner + S3Presigner s3Presigner = S3Presigner.builder().region(region).build(); + boolean urlGenerated = false; + boolean urlAccessible = false; + + try { + // Generate the presigned GET URL + String presignedURL = GeneratePresignedGetURLForDirectoryBucket.generatePresignedGetURLForDirectoryBucket(s3Presigner, testSourceBucketName, objectKey1); + + // Check if the presigned URL is not null + if (presignedURL != null) { + urlGenerated = true; + // Make an HTTP GET request to the presigned URL + HttpURLConnection connection = (HttpURLConnection) new URL(presignedURL).openConnection(); + connection.setRequestMethod("GET"); + + // Verify the HTTP response status code + int responseCode = connection.getResponseCode(); + urlAccessible = (responseCode == 200); + + connection.disconnect(); + } + } catch (RuntimeException | IOException e) { + logger.error("Failed to generate or access presigned GET URL for '{}': {}", testSourceBucketName, e.getMessage()); + throw new RuntimeException("Test failed due to an error", e); + } + + // Verify the presigned GET URL was generated and is accessible + Assertions.assertTrue(urlGenerated, "The presigned GET URL should be generated successfully"); + Assertions.assertTrue(urlAccessible, "The presigned GET URL should be accessible"); + logger.info("Test passed: Generated and accessed presigned GET URL successfully for '{}'", testSourceBucketName); + } + + @Test + @Tag("IntegrationTest") + void testDeleteObject() { + String objectKeyDelete1 = "example-object-1-delete"; + boolean objectExistsBeforeDeletion = false; + boolean objectDeleted = false; + + try { + // Put an object into the bucket for testing deletion + putDirectoryBucketObject(s3Client, testSourceBucketName, objectKeyDelete1, filePath1); + + // Verify the object exists before deletion + objectExistsBeforeDeletion = checkObjectExists(s3Client, testSourceBucketName, objectKeyDelete1); + + // Delete the object + DeleteDirectoryBucketObject.deleteDirectoryBucketObject(s3Client, testSourceBucketName, objectKeyDelete1); + + // Verify the object is deleted + objectDeleted = !checkObjectExists(s3Client, testSourceBucketName, objectKeyDelete1); + } catch (RuntimeException e) { + logger.error("Failed to delete object from '{}': {}", testSourceBucketName, e.getMessage()); + throw e; + } + + // Verify the object existed before deletion and was successfully deleted + Assertions.assertTrue(objectExistsBeforeDeletion, "The object should exist in the bucket before deletion"); + Assertions.assertTrue(objectDeleted, "The object should be successfully deleted from the bucket"); + logger.info("Test passed: Deleted object successfully from '{}'", testSourceBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testDeleteObjects() { + final String DELETE_OBJECTS_BUCKET_BASE_NAME = "test-delete-objects-bucket-name"; + String testDeleteObjectsBucketName; + // Create buckets and put objects for exception and DeleteObjects testing + long timestamp = System.currentTimeMillis(); + testDeleteObjectsBucketName = generateBucketName(DELETE_OBJECTS_BUCKET_BASE_NAME, timestamp); + createDirectoryBucket(s3Client, testDeleteObjectsBucketName, ZONE); + createBuckets.add(testDeleteObjectsBucketName); + String objectKeyDelete1 = "example-object-1-delete"; + String objectKeyDelete2 = "example-object-2-delete"; + boolean objectsExistBeforeDeletion = false; + boolean objectsDeleted = false; + + try { + putDirectoryBucketObject(s3Client, testDeleteObjectsBucketName, objectKeyDelete1, filePath1); + putDirectoryBucketObject(s3Client, testDeleteObjectsBucketName, objectKeyDelete2, filePath2); + + // Verify the objects exist before deletion + objectsExistBeforeDeletion = checkObjectExists(s3Client, testDeleteObjectsBucketName, objectKeyDelete1) && + checkObjectExists(s3Client, testDeleteObjectsBucketName, objectKeyDelete2); + + // Delete the objects + DeleteDirectoryBucketObjects.deleteDirectoryBucketObjects(s3Client, testDeleteObjectsBucketName, + List.of(objectKeyDelete1, objectKeyDelete2)); + + // Verify the objects are deleted + objectsDeleted = !checkObjectExists(s3Client, testDeleteObjectsBucketName, objectKeyDelete1) && + !checkObjectExists(s3Client, testDeleteObjectsBucketName, objectKeyDelete2); + } catch (RuntimeException e) { + logger.error("Failed to delete objects from '{}': {}", testDeleteObjectsBucketName, e.getMessage()); + throw e; + } + + // Verify the objects existed before deletion and were successfully deleted + Assertions.assertTrue(objectsExistBeforeDeletion, "The objects should exist in the bucket before deletion"); + Assertions.assertTrue(objectsDeleted, "The objects should be successfully deleted from the bucket"); + logger.info("Test passed: Deleted multiple objects successfully from '{}'", testDeleteObjectsBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testCreateMultipartUpload() { + String uploadId = null; + boolean multipartUploadCreated = false; + + try { + // Create a multipart upload + uploadId = CreateDirectoryBucketMultipartUpload.createDirectoryBucketMultipartUpload(s3Client, testSourceBucketName, mpuObject1); + + // Check if the uploadId is not null + if (uploadId != null) { + multipartUploadCreated = true; + } + } catch (RuntimeException e) { + logger.error("Failed to create multipart upload for '{}': {}", testSourceBucketName, e.getMessage()); + throw e; + } + + // Verify the multipart upload was created successfully + Assertions.assertTrue(multipartUploadCreated, "The multipart upload should be created successfully"); + logger.info("Test passed: Created multipart upload successfully for '{}'", testSourceBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testUploadPart() throws IOException { + final String MPU_BUCKET_BASE_NAME = "test-mpu-bucket-name"; + String testMPUBucketName; + long timestamp = System.currentTimeMillis(); + testMPUBucketName = generateBucketName(MPU_BUCKET_BASE_NAME, timestamp); + List completedParts = null; + boolean partUploaded = false; + + // Create the bucket and multipart upload + createDirectoryBucket(s3Client, testMPUBucketName, ZONE); + createBuckets.add(testMPUBucketName); + String uploadId1 = createDirectoryBucketMultipartUpload(s3Client, testMPUBucketName, mpuObject1); + + try { + // Upload a part for the multipart upload + completedParts = UploadPartForDirectoryBucket.multipartUploadForDirectoryBucket(s3Client, testMPUBucketName, mpuObject1, uploadId1, filePathLarge); + + // Check if the part was uploaded successfully by verifying if completedParts is not null or empty + if (completedParts != null && !completedParts.isEmpty()) { + partUploaded = true; + } + } catch (RuntimeException | IOException e) { + logger.error("Failed to upload part for '{}': {}", testMPUBucketName, e.getMessage()); + throw e; + } + + // Verify the part was uploaded successfully + Assertions.assertTrue(partUploaded, "The part should be uploaded successfully"); + logger.info("Test passed: Uploaded part successfully for '{}'", testMPUBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testUploadPartCopy() throws IOException { + String uploadIdSourceMPU; + String uploadIdDestinationMPU; + List uploadedPartsListSource = null; + List copiedPartsListDestination = null; + boolean copySuccessful = false; + + // Create a multipart upload of source bucket for testing + uploadIdSourceMPU = createDirectoryBucketMultipartUpload(s3Client, testSourceBucketName, mpuObject1); + + // Perform multipart upload in the source directory bucket for testing + uploadedPartsListSource = multipartUploadForDirectoryBucket(s3Client, testSourceBucketName, mpuObject1, uploadIdSourceMPU, filePathLarge); + + // Complete Multipart Uploads + completeDirectoryBucketMultipartUpload(s3Client, testSourceBucketName, mpuObject1, uploadIdSourceMPU, uploadedPartsListSource); + + // Create a multipart upload of destination bucket for testing + uploadIdDestinationMPU = createDirectoryBucketMultipartUpload(s3Client, testBucketName, mpuObject2); + + try { + // Perform multipart upload copy in the destination directory bucket + copiedPartsListDestination = UploadPartCopyForDirectoryBucket.multipartUploadCopyForDirectoryBucket(s3Client, testSourceBucketName, mpuObject1, testBucketName, mpuObject2, uploadIdDestinationMPU); + + // Check if the copy was successful by verifying if copiedPartsListDestination is not null or empty + if (copiedPartsListDestination != null && !copiedPartsListDestination.isEmpty()) { + copySuccessful = true; + } + } catch (RuntimeException e) { + logger.error("Failed to copy part for the object '{}': {}", mpuObject2, e.getMessage()); + throw e; + } + + // Verify the part was copied successfully + Assertions.assertTrue(copySuccessful, "The part should be copied successfully"); + logger.info("Test passed: Copied part successfully for the object copy '{}' to the destination bucket '{}'", mpuObject2, testBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testListParts() throws IOException { + // Create a multipart upload for ListParts and ListMultipartUpload testing + String uploadId2; + uploadId2 = createDirectoryBucketMultipartUpload(s3Client, testBucketName, mpuObject1); + List uploadedPartsList; + + // Perform multipart upload in the directory bucket for testing + uploadedPartsList = multipartUploadForDirectoryBucket(s3Client, testBucketName, mpuObject1, uploadId2, filePathLarge); + + List parts = null; + boolean partsListed = false; + + try { + // List the parts of the multipart upload + parts = ListDirectoryBucketParts.listDirectoryBucketMultipartUploadParts(s3Client, testBucketName, mpuObject1, uploadId2); + + // Check if the parts list is not null or empty + if (parts != null && !parts.isEmpty()) { + partsListed = true; + } + } catch (RuntimeException e) { + logger.error("Failed to list parts for '{}': {}", testBucketName, e.getMessage()); + throw e; + } + + // Verify the parts were listed successfully + Assertions.assertTrue(partsListed, "The parts should be listed successfully"); + parts.forEach(part -> logger.info("Part Number: {}, ETag: {}, Size: {}", part.partNumber(), part.eTag(), part.size())); + logger.info("Test passed: Listed parts successfully for '{}'", testBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testListMultipartUpload() throws IOException { + // Create a multipart upload for testing + String uploadId = createDirectoryBucketMultipartUpload(s3Client, testBucketName, mpuObject1); + List uploadedPartsList; + + // Perform multipart upload in the directory bucket for testing + uploadedPartsList = multipartUploadForDirectoryBucket(s3Client, testBucketName, mpuObject1, uploadId, filePathLarge); + + boolean uploadsListed = false; + List multipartUploads = null; + + try { + // List the multipart uploads in the bucket + multipartUploads = ListDirectoryBucketMultipartUpload.listDirectoryBucketMultipartUploads(s3Client, testBucketName); + + // Check if the multipart uploads list is not null or empty + if (multipartUploads != null && !multipartUploads.isEmpty()) { + uploadsListed = true; + } + } catch (RuntimeException e) { + logger.error("Failed to list multipart uploads for '{}': {}", testBucketName, e.getMessage()); + throw e; + } + + // Verify the multipart uploads were listed successfully + Assertions.assertTrue(uploadsListed, "The multipart uploads should be listed successfully"); + multipartUploads.forEach(upload -> logger.info("Upload ID: {}, Key: {}", upload.uploadId(), upload.key())); + logger.info("Test passed: Listed multipart uploads successfully for '{}'", testBucketName); + } + + + + @Test + @Tag("IntegrationTest") + void testCompleteMultipartUpload() throws IOException { + String uploadId3; + boolean multipartUploadCompleted = false; + + // Create a multipart upload for testing + uploadId3 = createDirectoryBucketMultipartUpload(s3Client, testBucketName, mpuObject2); + // Perform multipart upload in the directory bucket for testing + List uploadedPartsList = multipartUploadForDirectoryBucket(s3Client, testBucketName, mpuObject2, uploadId3, filePathLarge); + + try { + // Complete the multipart upload + multipartUploadCompleted = CompleteDirectoryBucketMultipartUpload.completeDirectoryBucketMultipartUpload(s3Client, testBucketName, mpuObject2, uploadId3, uploadedPartsList); + } catch (RuntimeException e) { + logger.error("Failed to complete multipart upload for '{}': {}", testBucketName, e.getMessage()); + throw e; + } + + // Verify the multipart upload was completed successfully + Assertions.assertTrue(multipartUploadCompleted, "The multipart upload should be completed successfully"); + logger.info("Test passed: Completed multipart upload successfully for '{}'", testBucketName); + } + + + @Test + @Tag("IntegrationTest") + void testAbortMultipartUpload() { + String uploadId4; + boolean abortSuccessful = false; + + // Create a multipart upload for testing + uploadId4 = createDirectoryBucketMultipartUpload(s3Client, testSourceBucketName, mpuObject1); + + try { + // Abort the multipart upload + abortSuccessful = AbortDirectoryBucketMultipartUploads.abortDirectoryBucketMultipartUpload(s3Client, testSourceBucketName, mpuObject1, uploadId4); + } catch (RuntimeException e) { + logger.error("Failed to abort multipart upload for '{}': {}", testSourceBucketName, e.getMessage()); + throw e; + } + + // Verify the multipart upload was aborted successfully + Assertions.assertTrue(abortSuccessful, "The multipart upload should be aborted successfully"); + logger.info("Test passed: Aborted multipart upload successfully for '{}'", testSourceBucketName); + } + + + @AfterAll + static void teardown() throws Exception { + KmsClient kmsClient = createKmsClient(region); + String kmsKeyId = null; + + // Empty and delete the S3 buckets created for testing + for (String bucketName : createBuckets) { + try { + // Delete all objects in the bucket + deleteAllObjectsInDirectoryBucket(s3Client, bucketName); + + // Abort multipart uploads + abortDirectoryBucketMultipartUploads(s3Client, bucketName); + + // Ensure the bucket exists before attempting to delete it + if (doesBucketExist(s3Client, bucketName)) { + // Delete the bucket + DeleteBucketRequest deleteBucketRequest = DeleteBucketRequest.builder() + .bucket(bucketName) + .build(); + s3Client.deleteBucket(deleteBucketRequest); + logger.info("Bucket deleted: " + bucketName); + } else { + logger.warn("Bucket does not exist and cannot be deleted: " + bucketName); + } + } catch (NoSuchBucketException e) { + logger.error("Failed to delete objects in bucket: " + bucketName + " - Error code: NoSuchBucket", e); + } catch (S3Exception e) { + if (e.awsErrorDetails().errorCode().contains("KMS.KMSInvalidStateException")) { + logger.warn("Bucket: {} could not be deleted due to KMS key pending deletion. Error: {}", bucketName, e.awsErrorDetails().errorMessage()); + } else { + logger.error("Failed to delete objects in bucket: " + bucketName + " - Error code: " + e.awsErrorDetails().errorCode(), e); + } + } catch (Exception e) { + logger.error("Failed to delete bucket: " + bucketName, e); + } + } + + // Schedule the deletion of the created KMS key + if (kmsKeyId != null) { + try { + String deletionDate = scheduleKeyDeletion(kmsClient, kmsKeyId, 7); // 7 days waiting period + logger.info("Key scheduled for deletion on: {}", deletionDate); + } catch (RuntimeException e) { + logger.error("Failed to schedule key deletion: {}", e.getMessage()); + } + } + + // Ensure the KMS client is closed + kmsClient.close(); + + // Close the S3 client + s3Client.close(); + } + + +} \ No newline at end of file