Skip to content

Commit

Permalink
added retrying and increased client timeout thresholds
Browse files Browse the repository at this point in the history
  • Loading branch information
smashyalts committed Mar 1, 2024
1 parent 8a349f0 commit 9a4995c
Show file tree
Hide file tree
Showing 3 changed files with 121 additions and 75 deletions.
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
package com.funniray.chunkydedicated;

import java.util.concurrent.ForkJoinPool;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
Expand All @@ -11,15 +11,22 @@
import org.bukkit.World;
import org.bukkit.plugin.java.JavaPlugin;
import org.popcraft.chunky.api.ChunkyAPI;

import java.awt.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.Random;

import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentLinkedQueue;
public final class ChunkyDedicated extends JavaPlugin {
ChunkyAPI chunky;
String bucketName = getConfig().getString("bucket-name");
Expand Down Expand Up @@ -81,16 +88,16 @@ public void onEnable() {
});
}

public void multipartUploadWithS3Client(String filePath) {
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.setSocketTimeout(60000); // 60-second socket timeout
clientConfiguration.setConnectionTimeout(60000); // 60-second connection timeout
public void multipartUploadWithS3Client(String filePath) {
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.setSocketTimeout(60000); // 60-second socket timeout
clientConfiguration.setConnectionTimeout(60000); // 60-second connection timeout

AmazonS3 s3 = AmazonS3ClientBuilder.standard()
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("https://" + getConfig().getString("cloudflare-account-id") + ".r2.cloudflarestorage.com" + "/" + getConfig().getString("bucket-name"), "auto"))
.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(getConfig().getString("cloudflare-access-key"), getConfig().getString("cloudflare-secret-key"))))
.withClientConfiguration(clientConfiguration) // Pass the client configuration here
.build();
AmazonS3 s3 = AmazonS3ClientBuilder.standard()
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("https://" + getConfig().getString("cloudflare-account-id") + ".r2.cloudflarestorage.com" + "/" + getConfig().getString("bucket-name"), "auto"))
.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(getConfig().getString("cloudflare-access-key"), getConfig().getString("cloudflare-secret-key"))))
.withClientConfiguration(clientConfiguration) // Pass the client configuration here
.build();

InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, "World." + key + ".zip");
InitiateMultipartUploadResult initResponse = s3.initiateMultipartUpload(initRequest);
Expand All @@ -99,49 +106,63 @@ public void multipartUploadWithS3Client(String filePath) {
long contentLength = file.length();
long partSize = 10 * 1024 * 1024; // Increased part size to 10MB

try {
ConcurrentLinkedQueue<PartETag> partETags = new ConcurrentLinkedQueue<>(); // Use ConcurrentLinkedQueue instead of synchronized list

try {
long filePosition = 0;
List<PartETag> partETags = new ArrayList<PartETag>();
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (int i = 1; filePosition < contentLength; i++) {

partSize = Math.min(partSize, (contentLength - filePosition));

UploadPartRequest uploadRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey("World." + key + ".zip")
.withUploadId(initResponse.getUploadId())
.withPartNumber(i)
.withFileOffset(filePosition)
.withFile(file)
.withPartSize(partSize);

// Retry logic
int retryCount = 0;
while (true) {
try {
UploadPartResult uploadResult = s3.uploadPart(uploadRequest);
partETags.add(uploadResult.getPartETag());
break;
} catch (Exception e) {
if (++retryCount > 10) { // Maximum of 3 retries
throw e; // If still failing after 3 retries, rethrow the exception
}
// Wait before retrying
try {
Thread.sleep(1000 * retryCount); // Wait for an increasing amount of time before retrying
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new RuntimeException(ie);
final int partNumber = i;
final long partFilePosition = filePosition;
final long partSizeFinal = partSize;

// Submit a CompletableFuture for each part upload
CompletableFuture<Void> future = CompletableFuture.runAsync(() -> {
// Retry logic
int retryCount = 0;
while (true) {
try (BufferedInputStream inputStream = new BufferedInputStream(new FileInputStream(file))) { // Use BufferedInputStream to read the file
inputStream.skip(partFilePosition); // Skip to the correct position in the file
getLogger().info("Part Number: " +partNumber);

UploadPartRequest uploadRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey("World." + key + ".zip")
.withUploadId(initResponse.getUploadId())
.withPartNumber(partNumber)
.withInputStream(inputStream) // Use the BufferedInputStream here
.withPartSize(partSizeFinal);

UploadPartResult uploadResult = s3.uploadPart(uploadRequest);
partETags.add(uploadResult.getPartETag());
break;
} catch (Exception e) {
if (++retryCount > 10) { // Maximum of 10 retries
throw new RuntimeException(e); // If still failing after 10 retries, rethrow the exception
}
// Wait before retrying
try {
Thread.sleep(1000 * retryCount); // Wait for an increasing amount of time before retrying
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new RuntimeException(ie);
}
}
}
}
});

futures.add(future);

filePosition += partSize;
}

// Wait for all CompletableFuture to complete
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();

CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(getConfig().getString("bucket-name"), "World." + key + ".zip",
initResponse.getUploadId(), partETags);
initResponse.getUploadId(), new ArrayList<>(partETags));

s3.completeMultipartUpload(compRequest);
getLogger().info("File uploaded successfully");
Expand All @@ -159,5 +180,6 @@ private boolean isFinished() {
@Override
public void onDisable() {
// Plugin shutdown logic
getLogger().info("I'm taking a fucking break from this shit");
}
}
Original file line number Diff line number Diff line change
@@ -1,49 +1,72 @@
package com.funniray.chunkydedicated;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.*;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.RecursiveAction;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;

public class ZipDirectory {
public class ZipDirectory {
public static void main(String args) throws IOException {
String sourceFile = args;
FileOutputStream fos = new FileOutputStream("world.zip");
ZipOutputStream zipOut = new ZipOutputStream(fos);

File fileToZip = new File(sourceFile);
zipFile(fileToZip, fileToZip.getName(), zipOut);

ForkJoinPool pool = new ForkJoinPool();
pool.invoke(new ZipAction(fileToZip, fileToZip.getName(), zipOut));

zipOut.close();
fos.close();
System.gc();
}

private static void zipFile(File fileToZip, String fileName, ZipOutputStream zipOut) throws IOException {
if (fileToZip.isHidden()) {
return;
static class ZipAction extends RecursiveAction {
private File fileToZip;
private String fileName;
private ZipOutputStream zipOut;

ZipAction(File fileToZip, String fileName, ZipOutputStream zipOut) {
this.fileToZip = fileToZip;
this.fileName = fileName;
this.zipOut = zipOut;
}
if (fileToZip.isDirectory()) {
if (fileName.endsWith("/")) {
zipOut.putNextEntry(new ZipEntry(fileName));
zipOut.closeEntry();
} else {
zipOut.putNextEntry(new ZipEntry(fileName + "/"));
zipOut.closeEntry();
}
File[] children = fileToZip.listFiles();
for (File childFile : children) {
zipFile(childFile, fileName + "/" + childFile.getName(), zipOut);

@Override
protected void compute() {
if (fileToZip.isHidden()) {
return;
}
} else {
FileInputStream fis = new FileInputStream(fileToZip);
ZipEntry zipEntry = new ZipEntry(fileName);
zipOut.putNextEntry(zipEntry);
byte[] bytes = new byte[1024];
int length;
while ((length = fis.read(bytes)) >= 0) {
zipOut.write(bytes, 0, length);
if (fileToZip.isDirectory()) {
if (!fileName.endsWith("/")) {
fileName += "/";
}
try {
zipOut.putNextEntry(new ZipEntry(fileName));
zipOut.closeEntry();
} catch (IOException e) {
e.printStackTrace();
}

File[] children = fileToZip.listFiles();
for (File childFile : children) {
ZipAction action = new ZipAction(childFile, fileName + childFile.getName(), zipOut);
action.fork(); // Start the action asynchronously
}
} else {
try (FileInputStream fis = new FileInputStream(fileToZip)) {
ZipEntry zipEntry = new ZipEntry(fileName);
zipOut.putNextEntry(zipEntry);
byte[] bytes = new byte[1024];
int length;
while ((length = fis.read(bytes)) >= 0) {
zipOut.write(bytes, 0, length);
}
} catch (IOException e) {
e.printStackTrace();
}
}
fis.close();
}
}}
}
}
3 changes: 2 additions & 1 deletion chunky-dedicated/src/main/resources/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,5 @@ cloudflare-access-key: ""
cloudflare-account-id: ""
cloudflare-secret-key: ""
bucket-name: ""
webhook: ""
webhook: ""
upload-threads: "5"

0 comments on commit 9a4995c

Please sign in to comment.