Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Java V2: Create 26 Java V2 code examples for S3 directory bucket actions, S3 #7067

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
434 changes: 434 additions & 0 deletions .doc_gen/metadata/s3-directory-buckets_metadata.yaml

Large diffs are not rendered by default.

26 changes: 0 additions & 26 deletions .doc_gen/metadata/s3_metadata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3481,32 +3481,6 @@ s3_Scenario_ConditionalRequests:
- python.example_code.s3.S3ConditionalRequests.wrapper
services:
s3: {GetObject, PutObject, CopyObject}
s3_Scenario_ExpressBasics:
title: Learn the basics of Amazon S3 Express One Zone with an &AWS; SDK
title_abbrev: Learn the basics of S3 Express One Zone
synopsis_list:
- Set up a VPC and VPC Endpoint
- Set up the S3 Express Policies, Roles, and User to work with S3 Express buckets
- Create two S3 Clients
- Create two buckets
- Create an object and copy it over
- Demonstrate performance difference
- Populate the buckets to show the lexicographical difference
- Prompt the user to see if they want to clean up the resources
category: Basics
languages:
PHP:
versions:
- sdk_version: 3
github: php/example_code/s3
sdkguide:
excerpts:
- description:
snippet_tags:
- php.example_code.s3.ExpressBasics
- php.example_code.s3.service.S3Service
services:
s3: {CreateVpc, DescribeRouteTables, CreateVpcEndpoint, CreateBucket, CopyObject, GetObject, PutObject, ListObjects, DeleteObject, DeleteBucket, DeleteVpcEndpoint, DeleteVpc}
s3_Scenario_DownloadS3Directory:
title: Download S3 'directories' from an &S3long; (&S3;) bucket
title_abbrev: Download S3 'directories'
Expand Down
1 change: 1 addition & 0 deletions .tools/readmes/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
"sdk_api_ref": 'https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/services/{{service["name"]}}/package-summary.html',
"service_folder_overrides": {
"s3-control": "javav2/example_code/s3/src/main/java/com/example/s3/batch",
"s3-directory-buckets": "javav2/example_code/s3/src/main/java/com/example/s3/directorybucket",
"medical-imaging": "javav2/example_code/medicalimaging",
},
},
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

package com.example.s3.directorybucket;

// snippet-start:[s3directorybuckets.java2.abortmultipartupload.import]
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.*;

import static com.example.s3.util.S3DirectoryBucketUtils.*;
// snippet-end:[s3directorybuckets.java2.abortmultipartupload.import]

/**
* Before running this example:
* <p/>
* The SDK must be able to authenticate AWS requests on your behalf. If you have
* not configured
* authentication for SDKs and tools, see
* https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs
* and Tools Reference Guide.
* <p/>
* You must have a runtime environment configured with the Java SDK.
* See
* https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in
* the Developer Guide if this is not set up.
* <p/>
* To use S3 directory buckets, configure a gateway VPC endpoint. This is the
* recommended method to enable directory bucket traffic without
* requiring an internet gateway or NAT device. For more information on
* configuring VPC gateway endpoints, visit
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway.
* <p/>
* Directory buckets are available in specific AWS Regions and Zones. For
* details on Regions and Zones supporting directory buckets, see
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints.
*/

public class AbortDirectoryBucketMultipartUploads {
private static final Logger logger = LoggerFactory.getLogger(AbortDirectoryBucketMultipartUploads.class);

// snippet-start:[s3directorybuckets.java2.abortmultipartupload.main]

/**
* Aborts a specific multipart upload for the specified S3 directory bucket.
*
* @param s3Client The S3 client used to interact with S3
* @param bucketName The name of the directory bucket
* @param objectKey The key (name) of the object to be uploaded
* @param uploadId The upload ID of the multipart upload to abort
* @return True if the multipart upload is successfully aborted, false otherwise
*/
public static boolean abortDirectoryBucketMultipartUpload(S3Client s3Client, String bucketName,
String objectKey, String uploadId) {
logger.info("Aborting multipart upload: {} for bucket: {}", uploadId, bucketName);
try {
// Abort the multipart upload
AbortMultipartUploadRequest abortMultipartUploadRequest = AbortMultipartUploadRequest.builder()
.bucket(bucketName)
.key(objectKey)
.uploadId(uploadId)
.build();

s3Client.abortMultipartUpload(abortMultipartUploadRequest);
logger.info("Aborted multipart upload: {} for object: {}", uploadId, objectKey);
return true;
} catch (S3Exception e) {
logger.error("Failed to abort multipart upload: {} - Error code: {}", e.awsErrorDetails().errorMessage(),
e.awsErrorDetails().errorCode());
return false;
}
}
// snippet-end:[s3directorybuckets.java2.abortmultipartupload.main]

public static void main(String[] args) {
Region region = Region.US_WEST_2;
S3Client s3Client = createS3Client(region);
String zone = "usw2-az1";
String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3";
String objectKey = "largeObject"; // your-object-key
String uploadId;

try {
// Create the directory bucket
createDirectoryBucket(s3Client, bucketName, zone);
// Create a Multipart Upload Request
uploadId = createDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey);

// Abort Multipart Uploads
boolean aborted = abortDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey, uploadId);
if (aborted) {
logger.info("Multipart upload successfully aborted for bucket: {}", bucketName);
} else {
logger.error("Failed to abort multipart upload for bucket: {}", bucketName);
}
} catch (S3Exception e) {
logger.error("An error occurred during S3 operations: {} - Error code: {}",
e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode());
} finally {
// Tear down by deleting the bucket
try {
deleteDirectoryBucket(s3Client, bucketName);
} catch (S3Exception e) {
logger.error("Failed to delete the bucket due to S3 error: {} - Error code: {}",
e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode());
} catch (Exception e) {
logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage());
} finally {
s3Client.close();
}
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
package com.example.s3.directorybucket;

// snippet-start:[s3directorybuckets.java2.completedirectorybucketmultipartupload.import]

import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest;
import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse;
import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload;
import software.amazon.awssdk.services.s3.model.CompletedPart;
import software.amazon.awssdk.services.s3.model.S3Exception;

import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;

import com.example.s3.util.S3DirectoryBucketUtils;

import static com.example.s3.util.S3DirectoryBucketUtils.*;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// snippet-end:[s3directorybuckets.java2.completedirectorybucketmultipartupload.import]

/**
* Before running this example:
* <p/>
* The SDK must be able to authenticate AWS requests on your behalf. If you have
* not configured
* authentication for SDKs and tools, see
* https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs
* and Tools Reference Guide.
* <p/>
* You must have a runtime environment configured with the Java SDK.
* See
* https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in
* the Developer Guide if this is not set up.
* <p/>
* To use S3 directory buckets, configure a gateway VPC endpoint. This is the
* recommended method to enable directory bucket traffic without
* requiring an internet gateway or NAT device. For more information on
* configuring VPC gateway endpoints, visit
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway.
* <p/>
* Directory buckets are available in specific AWS Regions and Zones. For
* details on Regions and Zones supporting directory buckets, see
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints.
*/

public class CompleteDirectoryBucketMultipartUpload {
private static final Logger logger = LoggerFactory.getLogger(CompleteDirectoryBucketMultipartUpload.class);

// snippet-start:[s3directorybuckets.java2.completedirectorybucketmultipartupload.main]

/**
* This method completes the multipart upload request by collating all the
* upload parts.
*
* @param s3Client The S3 client used to interact with S3
* @param bucketName The name of the directory bucket
* @param objectKey The key (name) of the object to be uploaded
* @param uploadId The upload ID used to track the multipart upload
* @param uploadParts The list of completed parts
* @return True if the multipart upload is successfully completed, false
* otherwise
*/
public static boolean completeDirectoryBucketMultipartUpload(S3Client s3Client, String bucketName, String objectKey,
String uploadId, List<CompletedPart> uploadParts) {
try {
CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder()
.parts(uploadParts)
.build();
CompleteMultipartUploadRequest completeMultipartUploadRequest = CompleteMultipartUploadRequest.builder()
.bucket(bucketName)
.key(objectKey)
.uploadId(uploadId)
.multipartUpload(completedMultipartUpload)
.build();

CompleteMultipartUploadResponse response = s3Client.completeMultipartUpload(completeMultipartUploadRequest);
logger.info("Multipart upload completed. ETag: {}", response.eTag());
return true;
} catch (S3Exception e) {
logger.error("Failed to complete multipart upload: {} - Error code: {}", e.awsErrorDetails().errorMessage(),
e.awsErrorDetails().errorCode());
return false;
}
}
// snippet-end:[s3directorybuckets.java2.completedirectorybucketmultipartupload.main]

// Main method for testing
public static void main(String[] args) {
Region region = Region.US_WEST_2;
S3Client s3Client = createS3Client(region);
String zone = "usw2-az1";
String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3";
String uploadId;
String objectKey = "largeObject";
Path filePath = Paths.get("src/main/resources/directoryBucket/sample-large-object.jpg");

try {
// Create the directory bucket
createDirectoryBucket(s3Client, bucketName, zone);
// Create a multipart upload
uploadId = createDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey);
// Perform multipart upload for the directory bucket
List<CompletedPart> uploadedParts = multipartUploadForDirectoryBucket(s3Client, bucketName, objectKey,
uploadId, filePath);
logger.info("Uploaded parts: {}", uploadedParts);
// Complete Multipart Uploads
boolean completed = completeDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey, uploadId,
uploadedParts);
if (completed) {
logger.info("Multipart upload successfully completed for bucket: {}", bucketName);
} else {
logger.error("Failed to complete multipart upload for bucket: {}", bucketName);
}
} catch (S3Exception e) {
logger.error("An error occurred during S3 operations: {} - Error code: {}",
e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode());
} catch (IOException e) {
logger.error("An I/O error occurred: {}", e.getMessage());
} finally {
// Error handling
try {
logger.info("Starting cleanup for bucket: {}", bucketName);
S3DirectoryBucketUtils.abortDirectoryBucketMultipartUploads(s3Client, bucketName);
deleteAllObjectsInDirectoryBucket(s3Client, bucketName);
deleteDirectoryBucket(s3Client, bucketName);
logger.info("Cleanup completed for bucket: {}", bucketName);
} catch (S3Exception e) {
logger.error("Error during cleanup: {} - Error code: {}", e.awsErrorDetails().errorMessage(),
e.awsErrorDetails().errorCode());
} catch (Exception e) {
logger.error("Unexpected error during cleanup: {}", e.getMessage());
} finally {
// Close the S3 client
s3Client.close();
}
}
}
}
Loading
Loading