diff --git a/oak-blob-cloud-azure/pom.xml b/oak-blob-cloud-azure/pom.xml index 191050c21b8..fc88302da65 100644 --- a/oak-blob-cloud-azure/pom.xml +++ b/oak-blob-cloud-azure/pom.xml @@ -44,11 +44,14 @@ com.fasterxml.jackson.annotation;resolution:=optional, com.fasterxml.jackson.databind*;resolution:=optional, - com.fasterxml.jackson.dataformat.xml;resolution:=optional, + com.fasterxml.jackson.dataformat.*;resolution:=optional, com.fasterxml.jackson.datatype*;resolution:=optional, com.azure.identity.broker.implementation;resolution:=optional, com.azure.xml;resolution:=optional, - com.microsoft.aad.msal4jextensions*;resolution:=optional, + com.azure.storage.common*;resolution:=optional, + com.azure.storage.internal*;resolution:=optional, + com.microsoft.aad.*;resolution:=optional, + com.microsoft.aad.msal4jextensions.persistence*;resolution:=optional, com.sun.net.httpserver;resolution:=optional, sun.misc;resolution:=optional, net.jcip.annotations;resolution:=optional, @@ -72,6 +75,14 @@ azure-core, azure-identity, azure-json, + azure-xml, + azure-storage-blob, + azure-storage-common, + azure-storage-internal-avro, + com.microsoft.aad, + com.microsoft.aad.msal4jextensions, + com.microsoft.aad.msal4jextensions.persistence, + jackson-dataformat-xml, guava, jsr305, reactive-streams, @@ -174,6 +185,11 @@ com.microsoft.azure azure-storage + + com.azure + azure-storage-blob + 12.27.1 + com.microsoft.azure azure-keyvault-core diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureBlobStoreBackend.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureBlobStoreBackend.java new file mode 100644 index 00000000000..3e199a5fcbf --- /dev/null +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureBlobStoreBackend.java @@ -0,0 +1,27 @@ +package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; + +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUpload; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadException; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadOptions; +import org.apache.jackrabbit.oak.spi.blob.AbstractSharedBackend; +import org.jetbrains.annotations.NotNull; + +import java.net.URI; +import java.util.Properties; + + +public abstract class AbstractAzureBlobStoreBackend extends AbstractSharedBackend { + + protected abstract DataRecordUpload initiateHttpUpload(long maxUploadSizeInBytes, int maxNumberOfURIs, @NotNull final DataRecordUploadOptions options); + protected abstract DataRecord completeHttpUpload(@NotNull String uploadTokenStr) throws DataRecordUploadException, DataStoreException; + protected abstract void setHttpDownloadURIExpirySeconds(int seconds); + protected abstract void setHttpUploadURIExpirySeconds(int seconds); + protected abstract void setHttpDownloadURICacheSize(int maxSize); + protected abstract URI createHttpDownloadURI(@NotNull DataIdentifier identifier, @NotNull DataRecordDownloadOptions downloadOptions); + public abstract void setProperties(final Properties properties); + +} diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProvider.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProvider.java index 01522248b0f..29e1f11dd86 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProvider.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProvider.java @@ -18,24 +18,20 @@ */ package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; -import com.azure.core.credential.AccessToken; -import com.azure.core.credential.TokenRequestContext; import com.azure.identity.ClientSecretCredential; import com.azure.identity.ClientSecretCredentialBuilder; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.StorageCredentialsToken; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.UserDelegationKey; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import com.microsoft.azure.storage.blob.CloudBlobClient; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.SharedAccessBlobHeaders; -import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; -import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobContainerClientBuilder; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobHttpHeaders; +import com.azure.storage.blob.models.UserDelegationKey; +import com.azure.storage.blob.sas.BlobSasPermission; +import com.azure.storage.blob.sas.BlobServiceSasSignatureValues; +import com.azure.storage.blob.specialized.BlockBlobClient; +import com.azure.storage.common.policy.RequestRetryOptions; import org.apache.commons.lang3.StringUtils; import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.slf4j.Logger; @@ -44,23 +40,13 @@ import java.io.Closeable; import java.net.URISyntaxException; import java.security.InvalidKeyException; -import java.time.Instant; -import java.time.LocalDateTime; import java.time.OffsetDateTime; -import java.time.format.DateTimeFormatter; -import java.util.Date; -import java.util.EnumSet; -import java.util.Objects; -import java.util.Optional; +import java.time.ZoneOffset; import java.util.Properties; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; public class AzureBlobContainerProvider implements Closeable { private static final Logger log = LoggerFactory.getLogger(AzureBlobContainerProvider.class); private static final String DEFAULT_ENDPOINT_SUFFIX = "core.windows.net"; - private static final String AZURE_DEFAULT_SCOPE = "https://storage.azure.com/.default"; private final String azureConnectionString; private final String accountName; private final String containerName; @@ -70,12 +56,6 @@ public class AzureBlobContainerProvider implements Closeable { private final String tenantId; private final String clientId; private final String clientSecret; - private ClientSecretCredential clientSecretCredential; - private AccessToken accessToken; - private StorageCredentialsToken storageCredentialsToken; - private static final long TOKEN_REFRESHER_INITIAL_DELAY = 45L; - private static final long TOKEN_REFRESHER_DELAY = 1L; - private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); private AzureBlobContainerProvider(Builder builder) { this.azureConnectionString = builder.azureConnectionString; @@ -89,6 +69,8 @@ private AzureBlobContainerProvider(Builder builder) { this.clientSecret = builder.clientSecret; } + @Override + public void close() {} public static class Builder { private final String containerName; @@ -171,141 +153,64 @@ public String getContainerName() { return containerName; } + public String getAzureConnectionString() { + return azureConnectionString; + } + @NotNull - public CloudBlobContainer getBlobContainer() throws DataStoreException { - return this.getBlobContainer(null); + public BlobContainerClient getBlobContainer() throws DataStoreException { + return this.getBlobContainer(null, new Properties()); } @NotNull - public CloudBlobContainer getBlobContainer(@Nullable BlobRequestOptions blobRequestOptions) throws DataStoreException { + public BlobContainerClient getBlobContainer(@Nullable RequestRetryOptions retryOptions, Properties properties) throws DataStoreException { // connection string will be given preference over service principals / sas / account key if (StringUtils.isNotBlank(azureConnectionString)) { log.debug("connecting to azure blob storage via azureConnectionString"); - return Utils.getBlobContainer(azureConnectionString, containerName, blobRequestOptions); + return Utils.getBlobContainerFromConnectionString(getAzureConnectionString(), accountName); } else if (authenticateViaServicePrincipal()) { log.debug("connecting to azure blob storage via service principal credentials"); - return getBlobContainerFromServicePrincipals(blobRequestOptions); + return getBlobContainerFromServicePrincipals(accountName, retryOptions); } else if (StringUtils.isNotBlank(sasToken)) { log.debug("connecting to azure blob storage via sas token"); final String connectionStringWithSasToken = Utils.getConnectionStringForSas(sasToken, blobEndpoint, accountName); - return Utils.getBlobContainer(connectionStringWithSasToken, containerName, blobRequestOptions); + return Utils.getBlobContainer(connectionStringWithSasToken, containerName, retryOptions, properties); } log.debug("connecting to azure blob storage via access key"); final String connectionStringWithAccountKey = Utils.getConnectionString(accountName, accountKey, blobEndpoint); - return Utils.getBlobContainer(connectionStringWithAccountKey, containerName, blobRequestOptions); - } - - @NotNull - private CloudBlobContainer getBlobContainerFromServicePrincipals(@Nullable BlobRequestOptions blobRequestOptions) throws DataStoreException { - StorageCredentialsToken storageCredentialsToken = getStorageCredentials(); - try { - CloudStorageAccount cloud = new CloudStorageAccount(storageCredentialsToken, true, DEFAULT_ENDPOINT_SUFFIX, accountName); - CloudBlobClient cloudBlobClient = cloud.createCloudBlobClient(); - if (blobRequestOptions != null) { - cloudBlobClient.setDefaultRequestOptions(blobRequestOptions); - } - return cloudBlobClient.getContainerReference(containerName); - } catch (URISyntaxException | StorageException e) { - throw new DataStoreException(e); - } - } - - @NotNull - private StorageCredentialsToken getStorageCredentials() { - boolean isAccessTokenGenerated = false; - /* generate access token, the same token will be used for subsequent access - * generated token is valid for 1 hour only and will be refreshed in background - * */ - if (accessToken == null) { - clientSecretCredential = new ClientSecretCredentialBuilder() - .clientId(clientId) - .clientSecret(clientSecret) - .tenantId(tenantId) - .build(); - accessToken = clientSecretCredential.getTokenSync(new TokenRequestContext().addScopes(AZURE_DEFAULT_SCOPE)); - if (accessToken == null || StringUtils.isBlank(accessToken.getToken())) { - log.error("Access token is null or empty"); - throw new IllegalArgumentException("Could not connect to azure storage, access token is null or empty"); - } - storageCredentialsToken = new StorageCredentialsToken(accountName, accessToken.getToken()); - isAccessTokenGenerated = true; - } - - Objects.requireNonNull(storageCredentialsToken, "storage credentials token cannot be null"); - - // start refresh token executor only when the access token is first generated - if (isAccessTokenGenerated) { - log.info("starting refresh token task at: {}", OffsetDateTime.now()); - TokenRefresher tokenRefresher = new TokenRefresher(); - executorService.scheduleWithFixedDelay(tokenRefresher, TOKEN_REFRESHER_INITIAL_DELAY, TOKEN_REFRESHER_DELAY, TimeUnit.MINUTES); - } - return storageCredentialsToken; + return Utils.getBlobContainer(connectionStringWithAccountKey, containerName, retryOptions, properties); } @NotNull - public String generateSharedAccessSignature(BlobRequestOptions requestOptions, + public String generateSharedAccessSignature(RequestRetryOptions retryOptions, String key, - EnumSet permissions, + BlobSasPermission blobSasPermissions, int expirySeconds, - SharedAccessBlobHeaders optionalHeaders) throws DataStoreException, URISyntaxException, StorageException, InvalidKeyException { - SharedAccessBlobPolicy policy = new SharedAccessBlobPolicy(); - Date expiry = Date.from(Instant.now().plusSeconds(expirySeconds)); - policy.setSharedAccessExpiryTime(expiry); - policy.setPermissions(permissions); + Properties properties) throws DataStoreException, URISyntaxException, InvalidKeyException { + + OffsetDateTime expiry = OffsetDateTime.now().plusSeconds(expirySeconds); + BlobServiceSasSignatureValues serviceSasSignatureValues = new BlobServiceSasSignatureValues(expiry, blobSasPermissions); - CloudBlockBlob blob = getBlobContainer(requestOptions).getBlockBlobReference(key); + BlockBlobClient blob = getBlobContainer(retryOptions, properties).getBlobClient(key).getBlockBlobClient(); if (authenticateViaServicePrincipal()) { - return generateUserDelegationKeySignedSas(blob, policy, optionalHeaders, expiry); + return generateUserDelegationKeySignedSas(blob, serviceSasSignatureValues, expiry); } - return generateSas(blob, policy, optionalHeaders); - } - - @NotNull - private String generateUserDelegationKeySignedSas(CloudBlockBlob blob, - SharedAccessBlobPolicy policy, - SharedAccessBlobHeaders optionalHeaders, - Date expiry) throws StorageException { - fillEmptyHeaders(optionalHeaders); - UserDelegationKey userDelegationKey = blob.getServiceClient().getUserDelegationKey(Date.from(Instant.now().minusSeconds(900)), - expiry); - return optionalHeaders == null ? blob.generateUserDelegationSharedAccessSignature(userDelegationKey, policy) : - blob.generateUserDelegationSharedAccessSignature(userDelegationKey, policy, optionalHeaders, null, null); - } - - /* set empty headers as blank string due to a bug in Azure SDK - * Azure SDK considers null headers as 'null' string which corrupts the string to sign and generates an invalid - * sas token - * */ - private void fillEmptyHeaders(SharedAccessBlobHeaders sharedAccessBlobHeaders) { - final String EMPTY_STRING = ""; - Optional.ofNullable(sharedAccessBlobHeaders) - .ifPresent(headers -> { - if (StringUtils.isBlank(headers.getCacheControl())) { - headers.setCacheControl(EMPTY_STRING); - } - if (StringUtils.isBlank(headers.getContentDisposition())) { - headers.setContentDisposition(EMPTY_STRING); - } - if (StringUtils.isBlank(headers.getContentEncoding())) { - headers.setContentEncoding(EMPTY_STRING); - } - if (StringUtils.isBlank(headers.getContentLanguage())) { - headers.setContentLanguage(EMPTY_STRING); - } - if (StringUtils.isBlank(headers.getContentType())) { - headers.setContentType(EMPTY_STRING); - } - }); + return generateSas(blob, serviceSasSignatureValues); } @NotNull - private String generateSas(CloudBlockBlob blob, - SharedAccessBlobPolicy policy, - SharedAccessBlobHeaders optionalHeaders) throws InvalidKeyException, StorageException { - return optionalHeaders == null ? blob.generateSharedAccessSignature(policy, null) : - blob.generateSharedAccessSignature(policy, - optionalHeaders, null, null, null, true); + public String generateUserDelegationKeySignedSas(BlockBlobClient blobClient, + BlobServiceSasSignatureValues serviceSasSignatureValues, + OffsetDateTime expiryTime) { + + BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() + .endpoint(String.format(String.format("https://%s.%s", accountName, DEFAULT_ENDPOINT_SUFFIX))) + .credential(getClientSecretCredential()) + .buildClient(); + OffsetDateTime startTime = OffsetDateTime.now(ZoneOffset.UTC); + UserDelegationKey userDelegationKey = blobServiceClient.getUserDelegationKey(startTime, expiryTime); + return blobClient.generateUserDelegationSas(serviceSasSignatureValues, userDelegationKey); } private boolean authenticateViaServicePrincipal() { @@ -313,34 +218,27 @@ private boolean authenticateViaServicePrincipal() { StringUtils.isNoneBlank(accountName, tenantId, clientId, clientSecret); } - private class TokenRefresher implements Runnable { - @Override - public void run() { - try { - log.debug("Checking for azure access token expiry at: {}", LocalDateTime.now()); - OffsetDateTime tokenExpiryThreshold = OffsetDateTime.now().plusMinutes(5); - if (accessToken.getExpiresAt() != null && accessToken.getExpiresAt().isBefore(tokenExpiryThreshold)) { - log.info("Access token is about to expire (5 minutes or less) at: {}. New access token will be generated", - accessToken.getExpiresAt().format(DateTimeFormatter.ISO_LOCAL_DATE_TIME)); - AccessToken newToken = clientSecretCredential.getTokenSync(new TokenRequestContext().addScopes(AZURE_DEFAULT_SCOPE)); - log.info("New azure access token generated at: {}", LocalDateTime.now()); - if (newToken == null || StringUtils.isBlank(newToken.getToken())) { - log.error("New access token is null or empty"); - return; - } - // update access token with newly generated token - accessToken = newToken; - storageCredentialsToken.updateToken(accessToken.getToken()); - } - } catch (Exception e) { - log.error("Error while acquiring new access token: ", e); - } - } + private ClientSecretCredential getClientSecretCredential() { + return new ClientSecretCredentialBuilder() + .clientId(clientId) + .clientSecret(clientSecret) + .tenantId(tenantId) + .build(); } - @Override - public void close() { - new ExecutorCloser(executorService).close(); - log.info("Refresh token executor service shutdown completed"); + @NotNull + private BlobContainerClient getBlobContainerFromServicePrincipals(String accountName, RequestRetryOptions retryOptions) { + ClientSecretCredential clientSecretCredential = getClientSecretCredential(); + return new BlobContainerClientBuilder() + .endpoint(String.format(String.format("https://%s.%s", accountName, DEFAULT_ENDPOINT_SUFFIX))) + .credential(clientSecretCredential) + .retryOptions(retryOptions) + .buildClient(); + } + + @NotNull + private String generateSas(BlockBlobClient blob, + BlobServiceSasSignatureValues blobServiceSasSignatureValues) { + return blob.generateSas(blobServiceSasSignatureValues, null); } -} +} \ No newline at end of file diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java index edbb9812699..a8552ac5b1b 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java @@ -18,15 +18,44 @@ */ package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; -import static java.lang.Thread.currentThread; +import com.azure.core.http.rest.Response; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobContainerProperties; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobProperties; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.Block; +import com.azure.storage.blob.models.BlockBlobItem; +import com.azure.storage.blob.models.BlockListType; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.models.ParallelTransferOptions; +import com.azure.storage.blob.options.BlobUploadFromFileOptions; +import com.azure.storage.blob.sas.BlobSasPermission; +import com.azure.storage.blob.specialized.BlockBlobClient; +import com.azure.storage.common.policy.RequestRetryOptions; +import com.microsoft.azure.storage.RetryPolicy; +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.core.data.DataStoreException; +import com.google.common.base.Strings; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.collect.AbstractIterator; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import org.apache.jackrabbit.oak.commons.PropertiesUtil; +import org.apache.jackrabbit.oak.spi.blob.AbstractDataRecord; +import org.apache.jackrabbit.oak.spi.blob.AbstractSharedBackend; +import org.apache.jackrabbit.util.Base64; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import java.io.BufferedInputStream; -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; +import java.io.*; import java.net.URI; import java.net.URISyntaxException; import java.net.URLEncoder; @@ -38,7 +67,6 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; @@ -48,72 +76,39 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.function.Function; +import java.util.stream.Collectors; -import org.apache.jackrabbit.guava.common.base.Strings; -import org.apache.jackrabbit.guava.common.cache.Cache; -import org.apache.jackrabbit.guava.common.cache.CacheBuilder; -import org.apache.jackrabbit.guava.common.collect.AbstractIterator; -import com.microsoft.azure.storage.AccessCondition; -import com.microsoft.azure.storage.LocationMode; -import com.microsoft.azure.storage.ResultContinuation; -import com.microsoft.azure.storage.ResultSegment; -import com.microsoft.azure.storage.RetryPolicy; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import com.microsoft.azure.storage.blob.BlockEntry; -import com.microsoft.azure.storage.blob.BlockListingFilter; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.CopyStatus; -import com.microsoft.azure.storage.blob.ListBlobItem; -import com.microsoft.azure.storage.blob.SharedAccessBlobHeaders; -import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; -import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUpload; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadException; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadOptions; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadToken; -import org.apache.jackrabbit.oak.spi.blob.AbstractDataRecord; -import org.apache.jackrabbit.oak.spi.blob.AbstractSharedBackend; -import org.apache.jackrabbit.util.Base64; -import org.jetbrains.annotations.NotNull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -public class AzureBlobStoreBackend extends AbstractSharedBackend { +import static java.lang.Thread.currentThread; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.BUFFERED_STREAM_THRESHOLD; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.DEFAULT_CONCURRENT_REQUEST_COUNT; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.LAST_MODIFIED_KEY; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_ALLOWABLE_UPLOAD_URIS; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_BINARY_UPLOAD_SIZE; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_CONCURRENT_REQUEST_COUNT; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_MULTIPART_UPLOAD_PART_SIZE; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_SINGLE_PUT_UPLOAD_SIZE; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.META_DIR_NAME; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.META_KEY_PREFIX; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MIN_MULTIPART_UPLOAD_PART_SIZE; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.REF_KEY; + + +public class AzureBlobStoreBackend extends AbstractAzureBlobStoreBackend { private static final Logger LOG = LoggerFactory.getLogger(AzureBlobStoreBackend.class); private static final Logger LOG_STREAMS_DOWNLOAD = LoggerFactory.getLogger("oak.datastore.download.streams"); private static final Logger LOG_STREAMS_UPLOAD = LoggerFactory.getLogger("oak.datastore.upload.streams"); - private static final String META_DIR_NAME = "META"; - private static final String META_KEY_PREFIX = META_DIR_NAME + "/"; - - private static final String REF_KEY = "reference.key"; - private static final String LAST_MODIFIED_KEY = "lastModified"; - - private static final long BUFFERED_STREAM_THRESHOLD = 1024 * 1024; - static final long MIN_MULTIPART_UPLOAD_PART_SIZE = 1024 * 1024 * 10; // 10MB - static final long MAX_MULTIPART_UPLOAD_PART_SIZE = 1024 * 1024 * 100; // 100MB - static final long MAX_SINGLE_PUT_UPLOAD_SIZE = 1024 * 1024 * 256; // 256MB, Azure limit - static final long MAX_BINARY_UPLOAD_SIZE = (long) Math.floor(1024L * 1024L * 1024L * 1024L * 4.75); // 4.75TB, Azure limit - private static final int MAX_ALLOWABLE_UPLOAD_URIS = 50000; // Azure limit - private static final int MAX_UNIQUE_RECORD_TRIES = 10; - private static final int DEFAULT_CONCURRENT_REQUEST_COUNT = 2; - private static final int MAX_CONCURRENT_REQUEST_COUNT = 50; - private Properties properties; private AzureBlobContainerProvider azureBlobContainerProvider; private int concurrentRequestCount = DEFAULT_CONCURRENT_REQUEST_COUNT; - private RetryPolicy retryPolicy; + private RequestRetryOptions retryOptions; private Integer requestTimeout; private int httpDownloadURIExpirySeconds = 0; // disabled by default private int httpUploadURIExpirySeconds = 0; // disabled by default @@ -121,7 +116,6 @@ public class AzureBlobStoreBackend extends AbstractSharedBackend { private String downloadDomainOverride = null; private boolean createBlobContainer = true; private boolean presignedDownloadURIVerifyExists = true; - private boolean enableSecondaryLocation = AzureConstants.AZURE_BLOB_ENABLE_SECONDARY_LOCATION_DEFAULT; private Cache httpDownloadURICache; @@ -130,36 +124,19 @@ public class AzureBlobStoreBackend extends AbstractSharedBackend { public void setProperties(final Properties properties) { this.properties = properties; } + private volatile BlobContainerClient azureContainer = null; - private volatile CloudBlobContainer azureContainer = null; - - protected CloudBlobContainer getAzureContainer() throws DataStoreException { + protected BlobContainerClient getAzureContainer() throws DataStoreException { if (azureContainer == null) { synchronized (this) { if (azureContainer == null) { - azureContainer = azureBlobContainerProvider.getBlobContainer(getBlobRequestOptions()); + azureContainer = azureBlobContainerProvider.getBlobContainer(retryOptions, properties); } } } return azureContainer; } - @NotNull - protected BlobRequestOptions getBlobRequestOptions() { - BlobRequestOptions requestOptions = new BlobRequestOptions(); - if (null != retryPolicy) { - requestOptions.setRetryPolicyFactory(retryPolicy); - } - if (null != requestTimeout) { - requestOptions.setTimeoutIntervalInMs(requestTimeout); - } - requestOptions.setConcurrentRequestCount(concurrentRequestCount); - if (enableSecondaryLocation) { - requestOptions.setLocationMode(LocationMode.PRIMARY_THEN_SECONDARY); - } - return requestOptions; - } - @Override public void init() throws DataStoreException { ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); @@ -171,16 +148,14 @@ public void init() throws DataStoreException { if (null == properties) { try { properties = Utils.readConfig(Utils.DEFAULT_CONFIG_FILE); - } - catch (IOException e) { + } catch (IOException e) { throw new DataStoreException("Unable to initialize Azure Data Store from " + Utils.DEFAULT_CONFIG_FILE, e); } } try { - Utils.setProxyIfNeeded(properties); createBlobContainer = PropertiesUtil.toBoolean( - Strings.emptyToNull(properties.getProperty(AzureConstants.AZURE_CREATE_CONTAINER)), true); + Strings.emptyToNull(properties.getProperty(AzureConstants.AZURE_CREATE_CONTAINER)), true); initAzureDSConfig(); concurrentRequestCount = PropertiesUtil.toInteger( @@ -199,19 +174,18 @@ public void init() throws DataStoreException { } LOG.info("Using concurrentRequestsPerOperation={}", concurrentRequestCount); - retryPolicy = Utils.getRetryPolicy(properties.getProperty(AzureConstants.AZURE_BLOB_MAX_REQUEST_RETRY)); if (properties.getProperty(AzureConstants.AZURE_BLOB_REQUEST_TIMEOUT) != null) { requestTimeout = PropertiesUtil.toInteger(properties.getProperty(AzureConstants.AZURE_BLOB_REQUEST_TIMEOUT), RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT); } + + retryOptions = Utils.getRetryOptions(properties.getProperty(AzureConstants.AZURE_BLOB_MAX_REQUEST_RETRY), requestTimeout, computeSecondaryLocationEndpoint()); + presignedDownloadURIVerifyExists = PropertiesUtil.toBoolean( - Strings.emptyToNull(properties.getProperty(AzureConstants.PRESIGNED_HTTP_DOWNLOAD_URI_VERIFY_EXISTS)), true); + Strings.emptyToNull(properties.getProperty(AzureConstants.PRESIGNED_HTTP_DOWNLOAD_URI_VERIFY_EXISTS)), true); - enableSecondaryLocation = PropertiesUtil.toBoolean( - properties.getProperty(AzureConstants.AZURE_BLOB_ENABLE_SECONDARY_LOCATION_NAME), - AzureConstants.AZURE_BLOB_ENABLE_SECONDARY_LOCATION_DEFAULT - ); - CloudBlobContainer azureContainer = getAzureContainer(); + + BlobContainerClient azureContainer = getAzureContainer(); if (createBlobContainer && !azureContainer.exists()) { azureContainer.create(); @@ -232,8 +206,7 @@ public void init() throws DataStoreException { String cacheMaxSize = properties.getProperty(AzureConstants.PRESIGNED_HTTP_DOWNLOAD_URI_CACHE_MAX_SIZE); if (null != cacheMaxSize) { this.setHttpDownloadURICacheSize(Integer.parseInt(cacheMaxSize)); - } - else { + } else { this.setHttpDownloadURICacheSize(0); // default } } @@ -242,17 +215,15 @@ public void init() throws DataStoreException { // Initialize reference key secret boolean createRefSecretOnInit = PropertiesUtil.toBoolean( - Strings.emptyToNull(properties.getProperty(AzureConstants.AZURE_REF_ON_INIT)), true); + Strings.emptyToNull(properties.getProperty(AzureConstants.AZURE_REF_ON_INIT)), true); if (createRefSecretOnInit) { getOrCreateReferenceKey(); } - } - catch (StorageException e) { + } catch (BlobStorageException e) { throw new DataStoreException(e); } - } - finally { + } finally { Thread.currentThread().setContextClassLoader(contextClassLoader); } } @@ -280,7 +251,7 @@ public InputStream read(DataIdentifier identifier) throws DataStoreException { try { Thread.currentThread().setContextClassLoader( getClass().getClassLoader()); - CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key); + BlockBlobClient blob = getAzureContainer().getBlobClient(key).getBlockBlobClient(); if (!blob.exists()) { throw new DataStoreException(String.format("Trying to read missing blob. identifier=%s", key)); } @@ -288,18 +259,13 @@ public InputStream read(DataIdentifier identifier) throws DataStoreException { InputStream is = blob.openInputStream(); LOG.debug("Got input stream for blob. identifier={} duration={}", key, (System.currentTimeMillis() - start)); if (LOG_STREAMS_DOWNLOAD.isDebugEnabled()) { - // Log message, with exception so we can get a trace to see where the call came from + // Log message, with exception, so we can get a trace to see where the call came from LOG_STREAMS_DOWNLOAD.debug("Binary downloaded from Azure Blob Storage - identifier={}", key, new Exception()); } return is; - } - catch (StorageException e) { + } catch (BlobStorageException e) { LOG.info("Error reading blob. identifier={}", key); throw new DataStoreException(String.format("Cannot read blob. identifier=%s", key), e); - } - catch (URISyntaxException e) { - LOG.debug("Error reading blob. identifier={}", key); - throw new DataStoreException(String.format("Cannot read blob. identifier=%s", key), e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); @@ -307,12 +273,40 @@ public InputStream read(DataIdentifier identifier) throws DataStoreException { } } + private void uploadBlob(BlockBlobClient client, File file, long len, long start, String key) throws IOException { + + boolean useBufferedStream = len < BUFFERED_STREAM_THRESHOLD; + try (InputStream in = useBufferedStream ? + new BufferedInputStream(new FileInputStream(file)) + : new FileInputStream(file)) { + + ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions() + .setBlockSizeLong(len) + .setMaxConcurrency(concurrentRequestCount) + .setMaxSingleUploadSizeLong(MAX_SINGLE_PUT_UPLOAD_SIZE); + BlobUploadFromFileOptions options = new BlobUploadFromFileOptions(file.toString()); + options.setParallelTransferOptions(parallelTransferOptions); + try { + BlobClient blobClient = client.getContainerClient().getBlobClient(file.getName()); + Response blockBlob = blobClient.uploadFromFileWithResponse(options, null, null); + LOG.debug("Upload status is {} for blob {}", blockBlob.getStatusCode(), key); + } catch (UncheckedIOException ex) { + System.err.printf("Failed to upload from file: %s%n", ex.getMessage()); + } + LOG.debug("Blob created. identifier={} length={} duration={} buffered={}", key, len, (System.currentTimeMillis() - start), useBufferedStream); + if (LOG_STREAMS_UPLOAD.isDebugEnabled()) { + // Log message, with exception, so we can get a trace to see where the call came from + LOG_STREAMS_UPLOAD.debug("Binary uploaded to Azure Blob Storage - identifier={}", key, new Exception()); + } + } + } + @Override public void write(DataIdentifier identifier, File file) throws DataStoreException { - if (null == identifier) { + if (identifier == null) { throw new NullPointerException("identifier"); } - if (null == file) { + if (file == null) { throw new NullPointerException("file"); } String key = getKeyName(identifier); @@ -324,46 +318,28 @@ public void write(DataIdentifier identifier, File file) throws DataStoreExceptio long len = file.length(); LOG.debug("Blob write started. identifier={} length={}", key, len); - CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key); + BlockBlobClient blob = getAzureContainer().getBlobClient(key).getBlockBlobClient(); if (!blob.exists()) { - addLastModified(blob); - - BlobRequestOptions options = new BlobRequestOptions(); - options.setConcurrentRequestCount(concurrentRequestCount); - boolean useBufferedStream = len < BUFFERED_STREAM_THRESHOLD; - final InputStream in = useBufferedStream ? new BufferedInputStream(new FileInputStream(file)) : new FileInputStream(file); - try { - blob.upload(in, len, null, options, null); - LOG.debug("Blob created. identifier={} length={} duration={} buffered={}", key, len, (System.currentTimeMillis() - start), useBufferedStream); - if (LOG_STREAMS_UPLOAD.isDebugEnabled()) { - // Log message, with exception so we can get a trace to see where the call came from - LOG_STREAMS_UPLOAD.debug("Binary uploaded to Azure Blob Storage - identifier={}", key, new Exception()); - } - } finally { - in.close(); - } + updateLastModifiedMetadata(blob); + uploadBlob(blob, file, len, start, key); return; } - blob.downloadAttributes(); - if (blob.getProperties().getLength() != len) { + if (blob.getProperties().getBlobSize() != len) { throw new DataStoreException("Length Collision. identifier=" + key + - " new length=" + len + - " old length=" + blob.getProperties().getLength()); + " new length=" + len + + " old length=" + blob.getProperties().getBlobSize()); } LOG.trace("Blob already exists. identifier={} lastModified={}", key, getLastModified(blob)); - addLastModified(blob); - blob.uploadMetadata(); + updateLastModifiedMetadata(blob); LOG.debug("Blob updated. identifier={} lastModified={} duration={}", key, - getLastModified(blob), (System.currentTimeMillis() - start)); - } - catch (StorageException e) { + getLastModified(blob), (System.currentTimeMillis() - start)); + } catch (BlobStorageException e) { LOG.info("Error writing blob. identifier={}", key, e); throw new DataStoreException(String.format("Cannot write blob. identifier=%s", key), e); - } - catch (URISyntaxException | IOException e) { + } catch (IOException e) { LOG.debug("Error writing blob. identifier={}", key, e); throw new DataStoreException(String.format("Cannot write blob. identifier=%s", key), e); } finally { @@ -373,58 +349,6 @@ public void write(DataIdentifier identifier, File file) throws DataStoreExceptio } } - private static boolean waitForCopy(CloudBlob blob) throws StorageException, InterruptedException { - boolean continueLoop = true; - CopyStatus status = CopyStatus.PENDING; - while (continueLoop) { - blob.downloadAttributes(); - status = blob.getCopyState().getStatus(); - continueLoop = status == CopyStatus.PENDING; - // Sleep if retry is needed - if (continueLoop) { - Thread.sleep(500); - } - } - return status == CopyStatus.SUCCESS; - } - - @Override - public byte[] getOrCreateReferenceKey() throws DataStoreException { - try { - if (secret != null && secret.length != 0) { - return secret; - } else { - byte[] key; - // Try reading from the metadata folder if it exists - key = readMetadataBytes(REF_KEY); - if (key == null) { - key = super.getOrCreateReferenceKey(); - addMetadataRecord(new ByteArrayInputStream(key), REF_KEY); - key = readMetadataBytes(REF_KEY); - } - secret = key; - return secret; - } - } catch (IOException e) { - throw new DataStoreException("Unable to get or create key " + e); - } - } - - private byte[] readMetadataBytes(String name) throws IOException, DataStoreException { - DataRecord rec = getMetadataRecord(name); - byte[] key = null; - if (rec != null) { - InputStream stream = null; - try { - stream = rec.getStream(); - return IOUtils.toByteArray(stream); - } finally { - IOUtils.closeQuietly(stream); - } - } - return key; - } - @Override public DataRecord getRecord(DataIdentifier identifier) throws DataStoreException { if (null == identifier) { @@ -436,30 +360,23 @@ public DataRecord getRecord(DataIdentifier identifier) throws DataStoreException try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); - CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key); - blob.downloadAttributes(); + BlockBlobClient blob = getAzureContainer().getBlobClient(key).getBlockBlobClient(); AzureBlobStoreDataRecord record = new AzureBlobStoreDataRecord( this, azureBlobContainerProvider, - new DataIdentifier(getIdentifierName(blob.getName())), + new DataIdentifier(getIdentifierName(blob.getBlobName())), getLastModified(blob), - blob.getProperties().getLength()); + blob.getProperties().getBlobSize()); LOG.debug("Data record read for blob. identifier={} duration={} record={}", - key, (System.currentTimeMillis() - start), record); + key, (System.currentTimeMillis() - start), record); return record; - } - catch (StorageException e) { - if (404 == e.getHttpStatusCode()) { + } catch (BlobStorageException e) { + if (e.getStatusCode() == 404) { LOG.debug("Unable to get record for blob; blob does not exist. identifier={}", key); - } - else { + } else { LOG.info("Error getting data record for blob. identifier={}", key, e); } throw new DataStoreException(String.format("Cannot retrieve blob. identifier=%s", key), e); - } - catch (URISyntaxException e) { - LOG.debug("Error getting data record for blob. identifier={}", key, e); - throw new DataStoreException(String.format("Cannot retrieve blob. identifier=%s", key), e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); @@ -468,15 +385,13 @@ public DataRecord getRecord(DataIdentifier identifier) throws DataStoreException } @Override - public Iterator getAllIdentifiers() { + public Iterator getAllIdentifiers() throws DataStoreException { return new RecordsIterator<>( - input -> new DataIdentifier(getIdentifierName(input.getName()))); + input -> new DataIdentifier(getIdentifierName(input.getName())), getAzureContainer()); } - - @Override - public Iterator getAllRecords() { + public Iterator getAllRecords() throws DataStoreException { final AbstractSharedBackend backend = this; return new RecordsIterator<>( input -> new AzureBlobStoreDataRecord( @@ -484,7 +399,7 @@ public Iterator getAllRecords() { azureBlobContainerProvider, new DataIdentifier(getIdentifierName(input.getName())), input.getLastModified(), - input.getLength()) + input.getLength()), getAzureContainer() ); } @@ -496,14 +411,12 @@ public boolean exists(DataIdentifier identifier) throws DataStoreException { try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); - boolean exists =getAzureContainer().getBlockBlobReference(key).exists(); + boolean exists = getAzureContainer().getBlobClient(key).getBlockBlobClient().exists(); LOG.debug("Blob exists={} identifier={} duration={}", exists, key, (System.currentTimeMillis() - start)); return exists; - } - catch (Exception e) { + } catch (Exception e) { throw new DataStoreException(e); - } - finally { + } finally { if (null != contextClassLoader) { Thread.currentThread().setContextClassLoader(contextClassLoader); } @@ -511,7 +424,7 @@ public boolean exists(DataIdentifier identifier) throws DataStoreException { } @Override - public void close() throws DataStoreException { + public void close(){ azureBlobContainerProvider.close(); LOG.info("AzureBlobBackend closed."); } @@ -526,17 +439,13 @@ public void deleteRecord(DataIdentifier identifier) throws DataStoreException { try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); - boolean result = getAzureContainer().getBlockBlobReference(key).deleteIfExists(); + boolean result = getAzureContainer().getBlobClient(key).getBlockBlobClient().deleteIfExists(); LOG.debug("Blob {}. identifier={} duration={}", result ? "deleted" : "delete requested, but it does not exist (perhaps already deleted)", key, (System.currentTimeMillis() - start)); - } - catch (StorageException e) { + } catch (BlobStorageException e) { LOG.info("Error deleting blob. identifier={}", key, e); throw new DataStoreException(e); - } - catch (URISyntaxException e) { - throw new DataStoreException(e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); @@ -557,7 +466,7 @@ public void addMetadataRecord(InputStream input, String name) throws DataStoreEx try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); - addMetadataRecordImpl(input, name, -1L); + addMetadataRecordImpl(input, name, -1); LOG.debug("Metadata record added. metadataName={} duration={}", name, (System.currentTimeMillis() - start)); } finally { @@ -569,7 +478,7 @@ public void addMetadataRecord(InputStream input, String name) throws DataStoreEx @Override public void addMetadataRecord(File input, String name) throws DataStoreException { - if (null == input) { + if (input == null) { throw new NullPointerException("input"); } if (Strings.isNullOrEmpty(name)) { @@ -582,30 +491,29 @@ public void addMetadataRecord(File input, String name) throws DataStoreException addMetadataRecordImpl(new FileInputStream(input), name, input.length()); LOG.debug("Metadata record added. metadataName={} duration={}", name, (System.currentTimeMillis() - start)); - } - catch (FileNotFoundException e) { + } catch (FileNotFoundException e) { throw new DataStoreException(e); - } - finally { + } finally { if (null != contextClassLoader) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } } + private BlockBlobClient getMetaBlobClient(String name) throws DataStoreException { + return getAzureContainer().getBlobClient(META_DIR_NAME + "/" + name).getBlockBlobClient(); + } + private void addMetadataRecordImpl(final InputStream input, String name, long recordLength) throws DataStoreException { try { - CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME); - CloudBlockBlob blob = metaDir.getBlockBlobReference(name); - addLastModified(blob); - blob.upload(input, recordLength); - } - catch (StorageException e) { + BlockBlobClient blockBlobClient = getMetaBlobClient(name); + updateLastModifiedMetadata(blockBlobClient); + blockBlobClient.upload(BinaryData.fromBytes(input.readAllBytes())); + } catch (BlobStorageException e) { LOG.info("Error adding metadata record. metadataName={} length={}", name, recordLength, e); throw new DataStoreException(e); - } - catch (URISyntaxException | IOException e) { - throw new DataStoreException(e); + } catch (IOException e) { + throw new RuntimeException(e); } } @@ -616,15 +524,14 @@ public DataRecord getMetadataRecord(String name) { try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); - CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME); - CloudBlockBlob blob = metaDir.getBlockBlobReference(name); - if (!blob.exists()) { + BlockBlobClient blockBlobClient = getMetaBlobClient(name); + if (!blockBlobClient.exists()) { LOG.warn("Trying to read missing metadata. metadataName={}", name); return null; } - blob.downloadAttributes(); - long lastModified = getLastModified(blob); - long length = blob.getProperties().getLength(); + + long lastModified = getLastModified(blockBlobClient); + long length = blockBlobClient.getProperties().getBlobSize(); AzureBlobStoreDataRecord record = new AzureBlobStoreDataRecord(this, azureBlobContainerProvider, new DataIdentifier(name), @@ -633,8 +540,7 @@ public DataRecord getMetadataRecord(String name) { true); LOG.debug("Metadata record read. metadataName={} duration={} record={}", name, (System.currentTimeMillis() - start), record); return record; - - } catch (StorageException e) { + } catch (BlobStorageException e) { LOG.info("Error reading metadata record. metadataName={}", name, e); throw new RuntimeException(e); } catch (Exception e) { @@ -658,29 +564,26 @@ public List getAllMetadataRecords(String prefix) { try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); - CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME); - for (ListBlobItem item : metaDir.listBlobs(prefix)) { - if (item instanceof CloudBlob) { - CloudBlob blob = (CloudBlob) item; - blob.downloadAttributes(); - records.add(new AzureBlobStoreDataRecord( - this, - azureBlobContainerProvider, - new DataIdentifier(stripMetaKeyPrefix(blob.getName())), - getLastModified(blob), - blob.getProperties().getLength(), - true)); - } + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(META_DIR_NAME); + + for (BlobItem blobItem : getAzureContainer().listBlobs(listBlobsOptions, null)) { + BlobClient blobClient = getAzureContainer().getBlobClient(blobItem.getName()); + BlobProperties properties = blobClient.getProperties(); + + records.add(new AzureBlobStoreDataRecord(this, + azureBlobContainerProvider, + new DataIdentifier(stripMetaKeyPrefix(blobClient.getBlobName())), + getLastModified(blobClient.getBlockBlobClient()), + properties.getBlobSize(), + true)); } LOG.debug("Metadata records read. recordsRead={} metadataFolder={} duration={}", records.size(), prefix, (System.currentTimeMillis() - start)); - } - catch (StorageException e) { + } catch (BlobStorageException e) { LOG.info("Error reading all metadata records. metadataFolder={}", prefix, e); - } - catch (DataStoreException | URISyntaxException e) { + } catch (DataStoreException e) { LOG.debug("Error reading all metadata records. metadataFolder={}", prefix, e); - } - finally { + } finally { if (null != contextClassLoader) { Thread.currentThread().setContextClassLoader(contextClassLoader); } @@ -695,21 +598,17 @@ public boolean deleteMetadataRecord(String name) { try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); - CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(addMetaKeyPrefix(name)); + BlobClient blob = getAzureContainer().getBlobClient(addMetaKeyPrefix(name)); boolean result = blob.deleteIfExists(); LOG.debug("Metadata record {}. metadataName={} duration={}", result ? "deleted" : "delete requested, but it does not exist (perhaps already deleted)", name, (System.currentTimeMillis() - start)); return result; - - } - catch (StorageException e) { + } catch (BlobStorageException e) { LOG.info("Error deleting metadata record. metadataName={}", name, e); - } - catch (DataStoreException | URISyntaxException e) { + } catch (DataStoreException e) { LOG.debug("Error deleting metadata record. metadataName={}", name, e); - } - finally { + } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } @@ -727,26 +626,25 @@ public void deleteAllMetadataRecords(String prefix) { try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); - CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME); int total = 0; - for (ListBlobItem item : metaDir.listBlobs(prefix)) { - if (item instanceof CloudBlob) { - if (((CloudBlob)item).deleteIfExists()) { - total++; - } + + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(META_DIR_NAME); + + for (BlobItem blobItem : getAzureContainer().listBlobs(listBlobsOptions, null)) { + BlobClient blobClient = getAzureContainer().getBlobClient(blobItem.getName()); + if (blobClient.deleteIfExists()) { + total++; } } LOG.debug("Metadata records deleted. recordsDeleted={} metadataFolder={} duration={}", total, prefix, (System.currentTimeMillis() - start)); - } - catch (StorageException e) { + } catch (BlobStorageException e) { LOG.info("Error deleting all metadata records. metadataFolder={}", prefix, e); - } - catch (DataStoreException | URISyntaxException e) { + } catch (DataStoreException e) { LOG.debug("Error deleting all metadata records. metadataFolder={}", prefix, e); - } - finally { + } finally { if (null != contextClassLoader) { Thread.currentThread().setContextClassLoader(contextClassLoader); } @@ -759,15 +657,13 @@ public boolean metadataRecordExists(String name) { ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); - CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(addMetaKeyPrefix(name)); + BlobClient blob = getAzureContainer().getBlobClient(addMetaKeyPrefix(name)); boolean exists = blob.exists(); LOG.debug("Metadata record {} exists {}. duration={}", name, exists, (System.currentTimeMillis() - start)); return exists; - } - catch (DataStoreException | StorageException | URISyntaxException e) { + } catch (DataStoreException | BlobStorageException e) { LOG.debug("Error checking existence of metadata record = {}", name, e); - } - finally { + } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } @@ -775,7 +671,6 @@ public boolean metadataRecordExists(String name) { return false; } - /** * Get key from data identifier. Object is stored with key in ADS. */ @@ -807,22 +702,26 @@ private static String stripMetaKeyPrefix(String name) { return name; } - private static void addLastModified(CloudBlockBlob blob) { - blob.getMetadata().put(LAST_MODIFIED_KEY, String.valueOf(System.currentTimeMillis())); + private static void updateLastModifiedMetadata(BlockBlobClient blockBlobClient) { + BlobContainerClient blobContainerClient = blockBlobClient.getContainerClient(); + Map metadata = blobContainerClient.getProperties().getMetadata(); + metadata.put(LAST_MODIFIED_KEY, String.valueOf(System.currentTimeMillis())); + blobContainerClient.setMetadata(metadata); } - private static long getLastModified(CloudBlob blob) { - if (blob.getMetadata().containsKey(LAST_MODIFIED_KEY)) { - return Long.parseLong(blob.getMetadata().get(LAST_MODIFIED_KEY)); + private static long getLastModified(BlockBlobClient blobClient) { + BlobContainerProperties blobProperties = blobClient.getContainerClient().getProperties(); + if (blobProperties.getMetadata().containsKey(LAST_MODIFIED_KEY)) { + return Long.parseLong(blobProperties.getMetadata().get(LAST_MODIFIED_KEY)); } - return blob.getProperties().getLastModified().getTime(); + return blobProperties.getLastModified().toInstant().toEpochMilli(); } - void setHttpDownloadURIExpirySeconds(int seconds) { + protected void setHttpDownloadURIExpirySeconds(int seconds) { httpDownloadURIExpirySeconds = seconds; } - void setHttpDownloadURICacheSize(int maxSize) { + protected void setHttpDownloadURICacheSize(int maxSize) { // max size 0 or smaller is used to turn off the cache if (maxSize > 0) { LOG.info("presigned GET URI cache enabled, maxSize = {} items, expiry = {} seconds", maxSize, httpDownloadURIExpirySeconds / 2); @@ -836,13 +735,13 @@ void setHttpDownloadURICacheSize(int maxSize) { } } - URI createHttpDownloadURI(@NotNull DataIdentifier identifier, - @NotNull DataRecordDownloadOptions downloadOptions) { + protected URI createHttpDownloadURI(@NotNull DataIdentifier identifier, + @NotNull DataRecordDownloadOptions downloadOptions) { URI uri = null; // When running unit test from Maven, it doesn't always honor the @NotNull decorators - if (null == identifier) throw new NullPointerException("identifier"); - if (null == downloadOptions) throw new NullPointerException("downloadOptions"); + if (identifier == null) throw new NullPointerException("identifier"); + if (downloadOptions == null) throw new NullPointerException("downloadOptions"); if (httpDownloadURIExpirySeconds > 0) { @@ -874,24 +773,10 @@ URI createHttpDownloadURI(@NotNull DataIdentifier identifier, } String key = getKeyName(identifier); - SharedAccessBlobHeaders headers = new SharedAccessBlobHeaders(); - headers.setCacheControl(String.format("private, max-age=%d, immutable", httpDownloadURIExpirySeconds)); - - String contentType = downloadOptions.getContentTypeHeader(); - if (! Strings.isNullOrEmpty(contentType)) { - headers.setContentType(contentType); - } - - String contentDisposition = - downloadOptions.getContentDispositionHeader(); - if (! Strings.isNullOrEmpty(contentDisposition)) { - headers.setContentDisposition(contentDisposition); - } uri = createPresignedURI(key, - EnumSet.of(SharedAccessBlobPermissions.READ), + new BlobSasPermission().setReadPermission(true), httpDownloadURIExpirySeconds, - headers, domain); if (uri != null && httpDownloadURICache != null) { httpDownloadURICache.put(cacheKey, uri); @@ -901,40 +786,39 @@ URI createHttpDownloadURI(@NotNull DataIdentifier identifier, return uri; } - void setHttpUploadURIExpirySeconds(int seconds) { httpUploadURIExpirySeconds = seconds; } + protected void setHttpUploadURIExpirySeconds(int seconds) { + httpUploadURIExpirySeconds = seconds; + } private DataIdentifier generateSafeRandomIdentifier() { return new DataIdentifier( String.format("%s-%d", - UUID.randomUUID().toString(), + UUID.randomUUID(), Instant.now().toEpochMilli() ) ); } - DataRecordUpload initiateHttpUpload(long maxUploadSizeInBytes, int maxNumberOfURIs, @NotNull final DataRecordUploadOptions options) { - List uploadPartURIs = new ArrayList<>(); + + protected DataRecordUpload initiateHttpUpload(long maxUploadSizeInBytes, int maxNumberOfURIs, @NotNull final DataRecordUploadOptions options) { + List uploadPartURIs = Lists.newArrayList(); long minPartSize = MIN_MULTIPART_UPLOAD_PART_SIZE; long maxPartSize = MAX_MULTIPART_UPLOAD_PART_SIZE; if (0L >= maxUploadSizeInBytes) { throw new IllegalArgumentException("maxUploadSizeInBytes must be > 0"); - } - else if (0 == maxNumberOfURIs) { + } else if (0 == maxNumberOfURIs) { throw new IllegalArgumentException("maxNumberOfURIs must either be > 0 or -1"); - } - else if (-1 > maxNumberOfURIs) { + } else if (-1 > maxNumberOfURIs) { throw new IllegalArgumentException("maxNumberOfURIs must either be > 0 or -1"); - } - else if (maxUploadSizeInBytes > MAX_SINGLE_PUT_UPLOAD_SIZE && + } else if (maxUploadSizeInBytes > MAX_SINGLE_PUT_UPLOAD_SIZE && maxNumberOfURIs == 1) { throw new IllegalArgumentException( String.format("Cannot do single-put upload with file size %d - exceeds max single-put upload size of %d", maxUploadSizeInBytes, MAX_SINGLE_PUT_UPLOAD_SIZE) ); - } - else if (maxUploadSizeInBytes > MAX_BINARY_UPLOAD_SIZE) { + } else if (maxUploadSizeInBytes > MAX_BINARY_UPLOAD_SIZE) { throw new IllegalArgumentException( String.format("Cannot do upload with file size %d - exceeds max upload size of %d", maxUploadSizeInBytes, @@ -981,8 +865,7 @@ else if (maxUploadSizeInBytes > MAX_BINARY_UPLOAD_SIZE) { String.format("Cannot do multi-part upload with requested part size %d", requestedPartSize) ); } - } - else { + } else { long maximalNumParts = (long) Math.ceil(((double) maxUploadSizeInBytes) / ((double) MIN_MULTIPART_UPLOAD_PART_SIZE)); numParts = Math.min(maximalNumParts, MAX_ALLOWABLE_UPLOAD_URIS); } @@ -993,8 +876,9 @@ else if (maxUploadSizeInBytes > MAX_BINARY_UPLOAD_SIZE) { throw new NullPointerException("Could not determine domain for direct upload"); } - EnumSet perms = EnumSet.of(SharedAccessBlobPermissions.WRITE); - Map presignedURIRequestParams = new HashMap<>(); + BlobSasPermission perms = new BlobSasPermission() + .setWritePermission(true); + Map presignedURIRequestParams = Maps.newHashMap(); // see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block#uri-parameters presignedURIRequestParams.put("comp", "block"); for (long blockId = 1; blockId <= numParts; ++blockId) { @@ -1043,7 +927,18 @@ public Collection getUploadURIs() { return null; } - DataRecord completeHttpUpload(@NotNull String uploadTokenStr) + private Long getUncommittedBlocksListSize(BlockBlobClient client) throws DataStoreException { + List blocks = client.listBlocks(BlockListType.UNCOMMITTED).getUncommittedBlocks(); + updateLastModifiedMetadata(client); + client.commitBlockList(blocks.stream().map(Block::getName).collect(Collectors.toList())); + long size = 0L; + for (Block block : blocks) { + size += block.getSize(); + } + return size; + } + + protected DataRecord completeHttpUpload(@NotNull String uploadTokenStr) throws DataRecordUploadException, DataStoreException { if (Strings.isNullOrEmpty(uploadTokenStr)) { @@ -1060,32 +955,19 @@ record = getRecord(blobId); // If this succeeds this means either it was a "single put" upload // (we don't need to do anything in this case - blob is already uploaded) // or it was completed before with the same token. - } - catch (DataStoreException e1) { + } catch (DataStoreException e1) { // record doesn't exist - so this means we are safe to do the complete request try { if (uploadToken.getUploadId().isPresent()) { - CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key); - // An existing upload ID means this is a multi-part upload - List blocks = blob.downloadBlockList( - BlockListingFilter.UNCOMMITTED, - AccessCondition.generateEmptyCondition(), - null, - null); - addLastModified(blob); - blob.commitBlockList(blocks); - long size = 0L; - for (BlockEntry block : blocks) { - size += block.getSize(); - } + BlockBlobClient blockBlobClient = getAzureContainer().getBlobClient(key).getBlockBlobClient(); + long size = getUncommittedBlocksListSize(blockBlobClient); record = new AzureBlobStoreDataRecord( this, azureBlobContainerProvider, blobId, - getLastModified(blob), + getLastModified(blockBlobClient), size); - } - else { + } else { // Something is wrong - upload ID missing from upload token // but record doesn't exist already, so this is invalid throw new DataRecordUploadException( @@ -1093,7 +975,7 @@ record = new AzureBlobStoreDataRecord( blobId) ); } - } catch (URISyntaxException | StorageException e2) { + } catch (BlobStorageException e2) { throw new DataRecordUploadException( String.format("Unable to finalize direct write of binary %s", blobId), e2 @@ -1134,26 +1016,16 @@ private String getDirectUploadBlobStorageDomain(boolean ignoreDomainOverride) { } private URI createPresignedURI(String key, - EnumSet permissions, - int expirySeconds, - SharedAccessBlobHeaders optionalHeaders, - String domain) { - return createPresignedURI(key, permissions, expirySeconds, new HashMap<>(), optionalHeaders, domain); - } - - private URI createPresignedURI(String key, - EnumSet permissions, + BlobSasPermission blobSasPermissions, int expirySeconds, - Map additionalQueryParams, String domain) { - return createPresignedURI(key, permissions, expirySeconds, additionalQueryParams, null, domain); + return createPresignedURI(key, blobSasPermissions, expirySeconds, Maps.newHashMap(), domain); } private URI createPresignedURI(String key, - EnumSet permissions, + BlobSasPermission blobSasPermissions, int expirySeconds, Map additionalQueryParams, - SharedAccessBlobHeaders optionalHeaders, String domain) { if (Strings.isNullOrEmpty(domain)) { LOG.warn("Can't generate presigned URI - no Azure domain provided (is Azure account name configured?)"); @@ -1162,8 +1034,8 @@ private URI createPresignedURI(String key, URI presignedURI = null; try { - String sharedAccessSignature = azureBlobContainerProvider.generateSharedAccessSignature(getBlobRequestOptions(), key, - permissions, expirySeconds, optionalHeaders); + String sharedAccessSignature = azureBlobContainerProvider.generateSharedAccessSignature(retryOptions, key, + blobSasPermissions, expirySeconds, properties); // Shared access signature is returned encoded already. String uriString = String.format("https://%s/%s/%s?%s", @@ -1172,7 +1044,7 @@ private URI createPresignedURI(String key, key, sharedAccessSignature); - if (! additionalQueryParams.isEmpty()) { + if (!additionalQueryParams.isEmpty()) { StringBuilder builder = new StringBuilder(); for (Map.Entry e : additionalQueryParams.entrySet()) { builder.append("&"); @@ -1184,21 +1056,18 @@ private URI createPresignedURI(String key, } presignedURI = new URI(uriString); - } - catch (DataStoreException e) { + } catch (DataStoreException e) { LOG.error("No connection to Azure Blob Storage", e); - } - catch (URISyntaxException | InvalidKeyException e) { + } catch (URISyntaxException | InvalidKeyException e) { LOG.error("Can't generate a presigned URI for key {}", key, e); - } - catch (StorageException e) { + } catch (BlobStorageException e) { LOG.error("Azure request to create presigned Azure Blob Storage {} URI failed. " + "Key: {}, Error: {}, HTTP Code: {}, Azure Error Code: {}", - permissions.contains(SharedAccessBlobPermissions.READ) ? "GET" : - (permissions.contains(SharedAccessBlobPermissions.WRITE) ? "PUT" : ""), + blobSasPermissions.hasReadPermission() ? "GET" : + ((blobSasPermissions.hasWritePermission()) ? "PUT" : ""), key, e.getMessage(), - e.getHttpStatusCode(), + e.getStatusCode(), e.getErrorCode()); } @@ -1228,25 +1097,25 @@ public long getLength() { return length; } - public static AzureBlobInfo fromCloudBlob(CloudBlob cloudBlob) throws StorageException { - cloudBlob.downloadAttributes(); - return new AzureBlobInfo(cloudBlob.getName(), + public static AzureBlobInfo fromCloudBlob(BlockBlobClient cloudBlob) throws BlobStorageException { + return new AzureBlobInfo(cloudBlob.getBlobName(), AzureBlobStoreBackend.getLastModified(cloudBlob), - cloudBlob.getProperties().getLength()); + cloudBlob.getProperties().getBlobSize()); } } private class RecordsIterator extends AbstractIterator { - // Seems to be thread-safe (in 5.0.0) - ResultContinuation resultContinuation; - boolean firstCall = true; - final Function transformer; - final Queue items = new LinkedList<>(); + private boolean firstCall = true; + private final Function transformer; + private final Queue items = Lists.newLinkedList(); + private final BlobContainerClient containerClient; - public RecordsIterator (Function transformer) { + public RecordsIterator(Function transformer, BlobContainerClient containerClient) { this.transformer = transformer; + this.containerClient = containerClient; } + @Nullable @Override protected T computeNext() { if (items.isEmpty()) { @@ -1264,34 +1133,22 @@ private boolean loadItems() { try { currentThread().setContextClassLoader(getClass().getClassLoader()); - CloudBlobContainer container = azureBlobContainerProvider.getBlobContainer(); - if (!firstCall && (resultContinuation == null || !resultContinuation.hasContinuation())) { - LOG.trace("No more records in container. containerName={}", container); + if (!firstCall) { + LOG.trace("No more records in container. containerName={}", containerClient.getBlobContainerName()); return false; } + firstCall = false; - ResultSegment results = container.listBlobsSegmented(null, false, EnumSet.noneOf(BlobListingDetails.class), null, resultContinuation, null, null); - resultContinuation = results.getContinuationToken(); - for (ListBlobItem item : results.getResults()) { - if (item instanceof CloudBlob) { - items.add(AzureBlobInfo.fromCloudBlob((CloudBlob)item)); - } - } + containerClient.listBlobs(new ListBlobsOptions(), null) + .forEach(blobItem -> items.add(AzureBlobInfo.fromCloudBlob(containerClient.getBlobClient(blobItem.getName()).getBlockBlobClient()))); LOG.debug("Container records batch read. batchSize={} containerName={} duration={}", - results.getLength(), getContainerName(), (System.currentTimeMillis() - start)); - return results.getLength() > 0; - } - catch (StorageException e) { - LOG.info("Error listing blobs. containerName={}", getContainerName(), e); - } - catch (DataStoreException e) { - LOG.debug("Cannot list blobs. containerName={}", getContainerName(), e); + items.size(), getContainerName(), (System.currentTimeMillis() - start)); + return !items.isEmpty(); } finally { if (contextClassLoader != null) { currentThread().setContextClassLoader(contextClassLoader); } } - return false; } } @@ -1323,20 +1180,20 @@ public long getLength() throws DataStoreException { @Override public InputStream getStream() throws DataStoreException { String id = getKeyName(getIdentifier()); - CloudBlobContainer container = azureBlobContainerProvider.getBlobContainer(); + BlobContainerClient container = azureBlobContainerProvider.getBlobContainer(); if (isMeta) { id = addMetaKeyPrefix(getIdentifier().toString()); } else { // Don't worry about stream logging for metadata records if (LOG_STREAMS_DOWNLOAD.isDebugEnabled()) { - // Log message, with exception so we can get a trace to see where the call came from + // Log message, with exception, so we can get a trace to see where the call came from LOG_STREAMS_DOWNLOAD.debug("Binary downloaded from Azure Blob Storage - identifier={} ", id, new Exception()); } } try { - return container.getBlockBlobReference(id).openInputStream(); - } catch (StorageException | URISyntaxException e) { + return container.getBlobClient(id).openInputStream(); + } catch (Exception e) { throw new DataStoreException(e); } } @@ -1362,4 +1219,54 @@ private String getContainerName() { .map(AzureBlobContainerProvider::getContainerName) .orElse(null); } + + @Override + public byte[] getOrCreateReferenceKey() throws DataStoreException { + try { + if (secret != null && secret.length != 0) { + return secret; + } else { + byte[] key; + // Try reading from the metadata folder if it exists + key = readMetadataBytes(REF_KEY); + if (key == null) { + key = super.getOrCreateReferenceKey(); + addMetadataRecord(new ByteArrayInputStream(key), REF_KEY); + key = readMetadataBytes(REF_KEY); + } + secret = key; + return secret; + } + } catch (IOException e) { + throw new DataStoreException("Unable to get or create key " + e); + } + } + + protected byte[] readMetadataBytes(String name) throws IOException, DataStoreException { + DataRecord rec = getMetadataRecord(name); + byte[] key = null; + if (rec != null) { + InputStream stream = null; + try { + stream = rec.getStream(); + return IOUtils.toByteArray(stream); + } finally { + IOUtils.closeQuietly(stream); + } + } + return key; + } + + private String computeSecondaryLocationEndpoint() { + String accountName = properties.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, ""); + + boolean enableSecondaryLocation = PropertiesUtil.toBoolean(properties.getProperty(AzureConstants.AZURE_BLOB_ENABLE_SECONDARY_LOCATION_NAME), + AzureConstants.AZURE_BLOB_ENABLE_SECONDARY_LOCATION_DEFAULT); + + if(enableSecondaryLocation) { + return String.format("https://%s-secondary.blob.core.windows.net", accountName); + } + + return null; + } } diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStore.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStore.java index 11575ad9667..035b494b534 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStore.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStore.java @@ -25,6 +25,7 @@ import org.apache.jackrabbit.core.data.DataIdentifier; import org.apache.jackrabbit.core.data.DataRecord; import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8.AzureBlobStoreBackendV8; import org.apache.jackrabbit.oak.plugins.blob.AbstractSharedCachingDataStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.ConfigurableDataRecordAccessProvider; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadException; @@ -41,11 +42,18 @@ public class AzureDataStore extends AbstractSharedCachingDataStore implements Co protected Properties properties; - private AzureBlobStoreBackend azureBlobStoreBackend; + private AbstractAzureBlobStoreBackend azureBlobStoreBackend; + + private final boolean useAzureSdkV12 = Boolean.getBoolean("blob.azure.v12.enabled"); @Override protected AbstractSharedBackend createBackend() { - azureBlobStoreBackend = new AzureBlobStoreBackend(); + if(useAzureSdkV12) { + azureBlobStoreBackend = new AzureBlobStoreBackend(); + } else { + azureBlobStoreBackend = new AzureBlobStoreBackendV8(); + } + if (null != properties) { azureBlobStoreBackend.setProperties(properties); } diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreService.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreService.java index 3ad2e5e46e8..36469401b9e 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreService.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreService.java @@ -20,6 +20,7 @@ package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; import org.apache.jackrabbit.oak.stats.StatisticsProvider; +import org.jetbrains.annotations.NotNull; import org.osgi.service.component.annotations.Component; import org.osgi.service.component.annotations.ConfigurationPolicy; import org.osgi.service.component.annotations.Reference; @@ -32,7 +33,7 @@ public class AzureDataStoreService extends AbstractAzureDataStoreService { public static final String NAME = "org.apache.jackrabbit.oak.plugins.blob.datastore.AzureDataStore"; - protected StatisticsProvider getStatisticsProvider(){ + protected @NotNull StatisticsProvider getStatisticsProvider(){ return statisticsProvider; } diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Constants.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Constants.java new file mode 100644 index 00000000000..19613f82f97 --- /dev/null +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Constants.java @@ -0,0 +1,19 @@ +package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; + +public interface Constants { + String META_DIR_NAME = "META"; + String META_KEY_PREFIX = META_DIR_NAME + "/"; + + String REF_KEY = "reference.key"; + String LAST_MODIFIED_KEY = "lastModified"; + + long BUFFERED_STREAM_THRESHOLD = 1024 * 1024; + long MIN_MULTIPART_UPLOAD_PART_SIZE = 1024 * 1024 * 10; // 10MB + long MAX_MULTIPART_UPLOAD_PART_SIZE = 1024 * 1024 * 100; // 100MB + long MAX_SINGLE_PUT_UPLOAD_SIZE = 1024 * 1024 * 256; // 256MB, Azure limit + long MAX_BINARY_UPLOAD_SIZE = (long) Math.floor(1024L * 1024L * 1024L * 1024L * 4.75); // 4.75TB, Azure limit + int MAX_ALLOWABLE_UPLOAD_URIS = 50000; // Azure limit + int MAX_UNIQUE_RECORD_TRIES = 10; + int DEFAULT_CONCURRENT_REQUEST_COUNT = 2; + int MAX_CONCURRENT_REQUEST_COUNT = 50; +} diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Utils.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Utils.java index b8fda6981d4..65c4586f860 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Utils.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Utils.java @@ -1,130 +1,86 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.InetSocketAddress; -import java.net.Proxy; -import java.net.SocketAddress; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.util.Properties; - -import org.apache.jackrabbit.guava.common.base.Strings; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.OperationContext; -import com.microsoft.azure.storage.RetryExponentialRetry; -import com.microsoft.azure.storage.RetryNoRetry; -import com.microsoft.azure.storage.RetryPolicy; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import com.microsoft.azure.storage.blob.CloudBlobClient; -import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.azure.core.http.HttpClient; +import com.azure.core.http.ProxyOptions; +import com.azure.core.http.netty.NettyAsyncHttpClientBuilder; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobContainerClientBuilder; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.common.policy.RequestRetryOptions; +import com.azure.storage.common.policy.RetryPolicyType; +import com.google.common.base.Strings; import org.apache.commons.lang3.StringUtils; import org.apache.jackrabbit.core.data.DataStoreException; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -public final class Utils { +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.InetSocketAddress; +import java.time.Duration; +import java.util.Properties; +public class Utils { + public static final String DASH = "-"; public static final String DEFAULT_CONFIG_FILE = "azure.properties"; - public static final String DASH = "-"; + public Utils() {} - /** - * private constructor so that class cannot initialized from outside. - */ - private Utils() { - } + public static BlobContainerClient getBlobContainer(@NotNull final String connectionString, + @NotNull final String containerName, + @Nullable final RequestRetryOptions retryOptions, + final Properties properties) throws DataStoreException { + try { + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() + .connectionString(connectionString) + .retryOptions(retryOptions); - /** - * Create CloudBlobClient from properties. - * - * @param connectionString connectionString to configure @link {@link CloudBlobClient} - * @return {@link CloudBlobClient} - */ - public static CloudBlobClient getBlobClient(@NotNull final String connectionString) throws URISyntaxException, InvalidKeyException { - return getBlobClient(connectionString, null); - } + HttpClient httpClient = new NettyAsyncHttpClientBuilder() + .proxy(computeProxyOptions(properties)) + .build(); - public static CloudBlobClient getBlobClient(@NotNull final String connectionString, - @Nullable final BlobRequestOptions requestOptions) throws URISyntaxException, InvalidKeyException { - CloudStorageAccount account = CloudStorageAccount.parse(connectionString); - CloudBlobClient client = account.createCloudBlobClient(); - if (null != requestOptions) { - client.setDefaultRequestOptions(requestOptions); - } - return client; - } + builder.httpClient(httpClient); - public static CloudBlobContainer getBlobContainer(@NotNull final String connectionString, - @NotNull final String containerName) throws DataStoreException { - return getBlobContainer(connectionString, containerName, null); - } + BlobServiceClient blobServiceClient = builder.buildClient(); + return blobServiceClient.getBlobContainerClient(containerName); - public static CloudBlobContainer getBlobContainer(@NotNull final String connectionString, - @NotNull final String containerName, - @Nullable final BlobRequestOptions requestOptions) throws DataStoreException { - try { - CloudBlobClient client = ( - (null == requestOptions) - ? Utils.getBlobClient(connectionString) - : Utils.getBlobClient(connectionString, requestOptions) - ); - return client.getContainerReference(containerName); - } catch (InvalidKeyException | URISyntaxException | StorageException e) { + } catch (Exception e) { throw new DataStoreException(e); } } - public static void setProxyIfNeeded(final Properties properties) { + public static ProxyOptions computeProxyOptions(final Properties properties) { String proxyHost = properties.getProperty(AzureConstants.PROXY_HOST); String proxyPort = properties.getProperty(AzureConstants.PROXY_PORT); - if (!Strings.isNullOrEmpty(proxyHost) && - Strings.isNullOrEmpty(proxyPort)) { - int port = Integer.parseInt(proxyPort); - SocketAddress proxyAddr = new InetSocketAddress(proxyHost, port); - Proxy proxy = new Proxy(Proxy.Type.HTTP, proxyAddr); - OperationContext.setDefaultProxy(proxy); + if(!Strings.isNullOrEmpty(proxyHost) && Strings.isNullOrEmpty(proxyPort)) { + return new ProxyOptions(ProxyOptions.Type.HTTP, + new InetSocketAddress(proxyHost, Integer.parseInt(proxyPort))); } + return null; } - public static RetryPolicy getRetryPolicy(final String maxRequestRetry) { - int retries = PropertiesUtil.toInteger(maxRequestRetry, -1); - if (retries < 0) { + public static RequestRetryOptions getRetryOptions(final String maxRequestRetryCount, Integer requestTimeout, String secondaryLocation) { + int retries = PropertiesUtil.toInteger(maxRequestRetryCount, -1); + if(retries < 0) { return null; } + if (retries == 0) { - return new RetryNoRetry(); + return new RequestRetryOptions(RetryPolicyType.FIXED, 1, + requestTimeout, null, null, + secondaryLocation); } - return new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, retries); + return new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, retries, + requestTimeout, null, null, + secondaryLocation); } - public static String getConnectionStringFromProperties(Properties properties) { - String sasUri = properties.getProperty(AzureConstants.AZURE_SAS, ""); String blobEndpoint = properties.getProperty(AzureConstants.AZURE_BLOB_ENDPOINT, ""); String connectionString = properties.getProperty(AzureConstants.AZURE_CONNECTION_STRING, ""); @@ -141,7 +97,7 @@ public static String getConnectionStringFromProperties(Properties properties) { return getConnectionString( accountName, - accountKey, + accountKey, blobEndpoint); } @@ -153,21 +109,24 @@ public static String getConnectionStringForSas(String sasUri, String blobEndpoin } } - public static String getConnectionString(final String accountName, final String accountKey) { - return getConnectionString(accountName, accountKey, null); - } - public static String getConnectionString(final String accountName, final String accountKey, String blobEndpoint) { StringBuilder connString = new StringBuilder("DefaultEndpointsProtocol=https"); connString.append(";AccountName=").append(accountName); connString.append(";AccountKey=").append(accountKey); - + if (!Strings.isNullOrEmpty(blobEndpoint)) { connString.append(";BlobEndpoint=").append(blobEndpoint); } return connString.toString(); } + public static BlobContainerClient getBlobContainerFromConnectionString(final String azureConnectionString, final String accountName) { + return new BlobContainerClientBuilder() + .connectionString(azureConnectionString) + .containerName(accountName) + .buildClient(); + } + /** * Read a configuration properties file. If the file name ends with ";burn", * the file is deleted after reading. diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8.java new file mode 100644 index 00000000000..9eddee3aa06 --- /dev/null +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8.java @@ -0,0 +1,347 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8; + +import com.azure.core.credential.AccessToken; +import com.azure.core.credential.TokenRequestContext; +import com.azure.identity.ClientSecretCredential; +import com.azure.identity.ClientSecretCredentialBuilder; +import com.microsoft.azure.storage.CloudStorageAccount; +import com.microsoft.azure.storage.StorageCredentialsToken; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.UserDelegationKey; +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.microsoft.azure.storage.blob.CloudBlobClient; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.microsoft.azure.storage.blob.SharedAccessBlobHeaders; +import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; +import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy; +import org.apache.commons.lang3.StringUtils; +import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Utils; +import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Date; +import java.util.EnumSet; +import java.util.Objects; +import java.util.Optional; +import java.util.Properties; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +public class AzureBlobContainerProviderV8 implements Closeable { + private static final Logger log = LoggerFactory.getLogger(AzureBlobContainerProviderV8.class); + private static final String DEFAULT_ENDPOINT_SUFFIX = "core.windows.net"; + private static final String AZURE_DEFAULT_SCOPE = "https://storage.azure.com/.default"; + private final String azureConnectionString; + private final String accountName; + private final String containerName; + private final String blobEndpoint; + private final String sasToken; + private final String accountKey; + private final String tenantId; + private final String clientId; + private final String clientSecret; + private ClientSecretCredential clientSecretCredential; + private AccessToken accessToken; + private StorageCredentialsToken storageCredentialsToken; + private static final long TOKEN_REFRESHER_INITIAL_DELAY = 45L; + private static final long TOKEN_REFRESHER_DELAY = 1L; + private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); + + private AzureBlobContainerProviderV8(Builder builder) { + this.azureConnectionString = builder.azureConnectionString; + this.accountName = builder.accountName; + this.containerName = builder.containerName; + this.blobEndpoint = builder.blobEndpoint; + this.sasToken = builder.sasToken; + this.accountKey = builder.accountKey; + this.tenantId = builder.tenantId; + this.clientId = builder.clientId; + this.clientSecret = builder.clientSecret; + } + + public static class Builder { + private final String containerName; + + private Builder(String containerName) { + this.containerName = containerName; + } + + public static Builder builder(String containerName) { + return new Builder(containerName); + } + + private String azureConnectionString; + private String accountName; + private String blobEndpoint; + private String sasToken; + private String accountKey; + private String tenantId; + private String clientId; + private String clientSecret; + + public Builder withAzureConnectionString(String azureConnectionString) { + this.azureConnectionString = azureConnectionString; + return this; + } + + public Builder withAccountName(String accountName) { + this.accountName = accountName; + return this; + } + + public Builder withBlobEndpoint(String blobEndpoint) { + this.blobEndpoint = blobEndpoint; + return this; + } + + public Builder withSasToken(String sasToken) { + this.sasToken = sasToken; + return this; + } + + public Builder withAccountKey(String accountKey) { + this.accountKey = accountKey; + return this; + } + + public Builder withTenantId(String tenantId) { + this.tenantId = tenantId; + return this; + } + + public Builder withClientId(String clientId) { + this.clientId = clientId; + return this; + } + + public Builder withClientSecret(String clientSecret) { + this.clientSecret = clientSecret; + return this; + } + + public Builder initializeWithProperties(Properties properties) { + withAzureConnectionString(properties.getProperty(AzureConstants.AZURE_CONNECTION_STRING, "")); + withAccountName(properties.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, "")); + withBlobEndpoint(properties.getProperty(AzureConstants.AZURE_BLOB_ENDPOINT, "")); + withSasToken(properties.getProperty(AzureConstants.AZURE_SAS, "")); + withAccountKey(properties.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY, "")); + withTenantId(properties.getProperty(AzureConstants.AZURE_TENANT_ID, "")); + withClientId(properties.getProperty(AzureConstants.AZURE_CLIENT_ID, "")); + withClientSecret(properties.getProperty(AzureConstants.AZURE_CLIENT_SECRET, "")); + return this; + } + + public AzureBlobContainerProviderV8 build() { + return new AzureBlobContainerProviderV8(this); + } + } + + public String getContainerName() { + return containerName; + } + + @NotNull + public CloudBlobContainer getBlobContainer() throws DataStoreException { + return this.getBlobContainer(null); + } + + @NotNull + public CloudBlobContainer getBlobContainer(@Nullable BlobRequestOptions blobRequestOptions) throws DataStoreException { + // connection string will be given preference over service principals / sas / account key + if (StringUtils.isNotBlank(azureConnectionString)) { + log.debug("connecting to azure blob storage via azureConnectionString"); + return UtilsV8.getBlobContainer(azureConnectionString, containerName, blobRequestOptions); + } else if (authenticateViaServicePrincipal()) { + log.debug("connecting to azure blob storage via service principal credentials"); + return getBlobContainerFromServicePrincipals(blobRequestOptions); + } else if (StringUtils.isNotBlank(sasToken)) { + log.debug("connecting to azure blob storage via sas token"); + final String connectionStringWithSasToken = Utils.getConnectionStringForSas(sasToken, blobEndpoint, accountName); + return UtilsV8.getBlobContainer(connectionStringWithSasToken, containerName, blobRequestOptions); + } + log.debug("connecting to azure blob storage via access key"); + final String connectionStringWithAccountKey = Utils.getConnectionString(accountName, accountKey, blobEndpoint); + return UtilsV8.getBlobContainer(connectionStringWithAccountKey, containerName, blobRequestOptions); + } + + @NotNull + private CloudBlobContainer getBlobContainerFromServicePrincipals(@Nullable BlobRequestOptions blobRequestOptions) throws DataStoreException { + StorageCredentialsToken storageCredentialsToken = getStorageCredentials(); + try { + CloudStorageAccount cloud = new CloudStorageAccount(storageCredentialsToken, true, DEFAULT_ENDPOINT_SUFFIX, accountName); + CloudBlobClient cloudBlobClient = cloud.createCloudBlobClient(); + if (blobRequestOptions != null) { + cloudBlobClient.setDefaultRequestOptions(blobRequestOptions); + } + return cloudBlobClient.getContainerReference(containerName); + } catch (URISyntaxException | StorageException e) { + throw new DataStoreException(e); + } + } + + @NotNull + private StorageCredentialsToken getStorageCredentials() { + boolean isAccessTokenGenerated = false; + /* generate access token, the same token will be used for subsequent access + * generated token is valid for 1 hour only and will be refreshed in background + * */ + if (accessToken == null) { + clientSecretCredential = new ClientSecretCredentialBuilder() + .clientId(clientId) + .clientSecret(clientSecret) + .tenantId(tenantId) + .build(); + accessToken = clientSecretCredential.getTokenSync(new TokenRequestContext().addScopes(AZURE_DEFAULT_SCOPE)); + if (accessToken == null || StringUtils.isBlank(accessToken.getToken())) { + log.error("Access token is null or empty"); + throw new IllegalArgumentException("Could not connect to azure storage, access token is null or empty"); + } + storageCredentialsToken = new StorageCredentialsToken(accountName, accessToken.getToken()); + isAccessTokenGenerated = true; + } + + Objects.requireNonNull(storageCredentialsToken, "storage credentials token cannot be null"); + + // start refresh token executor only when the access token is first generated + if (isAccessTokenGenerated) { + log.info("starting refresh token task at: {}", OffsetDateTime.now()); + TokenRefresher tokenRefresher = new TokenRefresher(); + executorService.scheduleWithFixedDelay(tokenRefresher, TOKEN_REFRESHER_INITIAL_DELAY, TOKEN_REFRESHER_DELAY, TimeUnit.MINUTES); + } + return storageCredentialsToken; + } + + @NotNull + public String generateSharedAccessSignature(BlobRequestOptions requestOptions, + String key, + EnumSet permissions, + int expirySeconds, + SharedAccessBlobHeaders optionalHeaders) throws DataStoreException, URISyntaxException, StorageException, InvalidKeyException { + SharedAccessBlobPolicy policy = new SharedAccessBlobPolicy(); + Date expiry = Date.from(Instant.now().plusSeconds(expirySeconds)); + policy.setSharedAccessExpiryTime(expiry); + policy.setPermissions(permissions); + + CloudBlockBlob blob = getBlobContainer(requestOptions).getBlockBlobReference(key); + + if (authenticateViaServicePrincipal()) { + return generateUserDelegationKeySignedSas(blob, policy, optionalHeaders, expiry); + } + return generateSas(blob, policy, optionalHeaders); + } + + @NotNull + private String generateUserDelegationKeySignedSas(CloudBlockBlob blob, + SharedAccessBlobPolicy policy, + SharedAccessBlobHeaders optionalHeaders, + Date expiry) throws StorageException { + fillEmptyHeaders(optionalHeaders); + UserDelegationKey userDelegationKey = blob.getServiceClient().getUserDelegationKey(Date.from(Instant.now().minusSeconds(900)), + expiry); + return optionalHeaders == null ? blob.generateUserDelegationSharedAccessSignature(userDelegationKey, policy) : + blob.generateUserDelegationSharedAccessSignature(userDelegationKey, policy, optionalHeaders, null, null); + } + + /* set empty headers as blank string due to a bug in Azure SDK + * Azure SDK considers null headers as 'null' string which corrupts the string to sign and generates an invalid + * sas token + * */ + private void fillEmptyHeaders(SharedAccessBlobHeaders sharedAccessBlobHeaders) { + final String EMPTY_STRING = ""; + Optional.ofNullable(sharedAccessBlobHeaders) + .ifPresent(headers -> { + if (StringUtils.isBlank(headers.getCacheControl())) { + headers.setCacheControl(EMPTY_STRING); + } + if (StringUtils.isBlank(headers.getContentDisposition())) { + headers.setContentDisposition(EMPTY_STRING); + } + if (StringUtils.isBlank(headers.getContentEncoding())) { + headers.setContentEncoding(EMPTY_STRING); + } + if (StringUtils.isBlank(headers.getContentLanguage())) { + headers.setContentLanguage(EMPTY_STRING); + } + if (StringUtils.isBlank(headers.getContentType())) { + headers.setContentType(EMPTY_STRING); + } + }); + } + + @NotNull + private String generateSas(CloudBlockBlob blob, + SharedAccessBlobPolicy policy, + SharedAccessBlobHeaders optionalHeaders) throws InvalidKeyException, StorageException { + return optionalHeaders == null ? blob.generateSharedAccessSignature(policy, null) : + blob.generateSharedAccessSignature(policy, + optionalHeaders, null, null, null, true); + } + + private boolean authenticateViaServicePrincipal() { + return StringUtils.isBlank(azureConnectionString) && + StringUtils.isNoneBlank(accountName, tenantId, clientId, clientSecret); + } + + private class TokenRefresher implements Runnable { + @Override + public void run() { + try { + log.debug("Checking for azure access token expiry at: {}", LocalDateTime.now()); + OffsetDateTime tokenExpiryThreshold = OffsetDateTime.now().plusMinutes(5); + if (accessToken.getExpiresAt() != null && accessToken.getExpiresAt().isBefore(tokenExpiryThreshold)) { + log.info("Access token is about to expire (5 minutes or less) at: {}. New access token will be generated", + accessToken.getExpiresAt().format(DateTimeFormatter.ISO_LOCAL_DATE_TIME)); + AccessToken newToken = clientSecretCredential.getTokenSync(new TokenRequestContext().addScopes(AZURE_DEFAULT_SCOPE)); + log.info("New azure access token generated at: {}", LocalDateTime.now()); + if (newToken == null || StringUtils.isBlank(newToken.getToken())) { + log.error("New access token is null or empty"); + return; + } + // update access token with newly generated token + accessToken = newToken; + storageCredentialsToken.updateToken(accessToken.getToken()); + } + } catch (Exception e) { + log.error("Error while acquiring new access token: ", e); + } + } + } + + @Override + public void close() { + new ExecutorCloser(executorService).close(); + log.info("Refresh token executor service shutdown completed"); + } +} diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8.java new file mode 100644 index 00000000000..19d3f22eb7d --- /dev/null +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8.java @@ -0,0 +1,1362 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8; + +import static java.lang.Thread.currentThread; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.BUFFERED_STREAM_THRESHOLD; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.DEFAULT_CONCURRENT_REQUEST_COUNT; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.LAST_MODIFIED_KEY; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_ALLOWABLE_UPLOAD_URIS; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_BINARY_UPLOAD_SIZE; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_CONCURRENT_REQUEST_COUNT; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_MULTIPART_UPLOAD_PART_SIZE; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_SINGLE_PUT_UPLOAD_SIZE; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.META_DIR_NAME; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.META_KEY_PREFIX; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MIN_MULTIPART_UPLOAD_PART_SIZE; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.REF_KEY; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.security.InvalidKeyException; +import java.time.Instant; +import java.util.Collection; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Properties; +import java.util.Queue; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import com.google.common.base.Strings; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.collect.AbstractIterator; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.microsoft.azure.storage.AccessCondition; +import com.microsoft.azure.storage.Constants; +import com.microsoft.azure.storage.LocationMode; +import com.microsoft.azure.storage.ResultContinuation; +import com.microsoft.azure.storage.ResultSegment; +import com.microsoft.azure.storage.RetryPolicy; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobListingDetails; +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.microsoft.azure.storage.blob.BlockEntry; +import com.microsoft.azure.storage.blob.BlockListingFilter; +import com.microsoft.azure.storage.blob.CloudBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.microsoft.azure.storage.blob.CopyStatus; +import com.microsoft.azure.storage.blob.ListBlobItem; +import com.microsoft.azure.storage.blob.SharedAccessBlobHeaders; +import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AbstractAzureBlobStoreBackend; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Utils; +import org.apache.jackrabbit.oak.commons.PropertiesUtil; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUpload; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadException; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadOptions; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadToken; +import org.apache.jackrabbit.oak.spi.blob.AbstractDataRecord; +import org.apache.jackrabbit.oak.spi.blob.AbstractSharedBackend; +import org.apache.jackrabbit.util.Base64; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AzureBlobStoreBackendV8 extends AbstractAzureBlobStoreBackend { + + private static final Logger LOG = LoggerFactory.getLogger(AzureBlobStoreBackendV8.class); + private static final Logger LOG_STREAMS_DOWNLOAD = LoggerFactory.getLogger("oak.datastore.download.streams"); + private static final Logger LOG_STREAMS_UPLOAD = LoggerFactory.getLogger("oak.datastore.upload.streams"); + + private Properties properties; + private AzureBlobContainerProviderV8 azureBlobContainerProvider; + private int concurrentRequestCount = DEFAULT_CONCURRENT_REQUEST_COUNT; + private RetryPolicy retryPolicy; + private Integer requestTimeout; + private int httpDownloadURIExpirySeconds = 0; // disabled by default + private int httpUploadURIExpirySeconds = 0; // disabled by default + private String uploadDomainOverride = null; + private String downloadDomainOverride = null; + private boolean createBlobContainer = true; + private boolean presignedDownloadURIVerifyExists = true; + private boolean enableSecondaryLocation = AzureConstants.AZURE_BLOB_ENABLE_SECONDARY_LOCATION_DEFAULT; + + private Cache httpDownloadURICache; + + private byte[] secret; + + public void setProperties(final Properties properties) { + this.properties = properties; + } + + private volatile CloudBlobContainer azureContainer = null; + + public CloudBlobContainer getAzureContainer() throws DataStoreException { + if (azureContainer == null) { + synchronized (this) { + if (azureContainer == null) { + azureContainer = azureBlobContainerProvider.getBlobContainer(getBlobRequestOptions()); + } + } + } + return azureContainer; + } + + @NotNull + protected BlobRequestOptions getBlobRequestOptions() { + BlobRequestOptions requestOptions = new BlobRequestOptions(); + if (null != retryPolicy) { + requestOptions.setRetryPolicyFactory(retryPolicy); + } + if (null != requestTimeout) { + requestOptions.setTimeoutIntervalInMs(requestTimeout); + } + requestOptions.setConcurrentRequestCount(concurrentRequestCount); + if (enableSecondaryLocation) { + requestOptions.setLocationMode(LocationMode.PRIMARY_THEN_SECONDARY); + } + return requestOptions; + } + + @Override + public void init() throws DataStoreException { + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + long start = System.currentTimeMillis(); + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + LOG.debug("Started backend initialization"); + + if (null == properties) { + try { + properties = Utils.readConfig(UtilsV8.DEFAULT_CONFIG_FILE); + } + catch (IOException e) { + throw new DataStoreException("Unable to initialize Azure Data Store from " + UtilsV8.DEFAULT_CONFIG_FILE, e); + } + } + + try { + UtilsV8.setProxyIfNeeded(properties); + createBlobContainer = PropertiesUtil.toBoolean( + Strings.emptyToNull(properties.getProperty(AzureConstants.AZURE_CREATE_CONTAINER)), true); + initAzureDSConfig(); + + concurrentRequestCount = PropertiesUtil.toInteger( + properties.getProperty(AzureConstants.AZURE_BLOB_CONCURRENT_REQUESTS_PER_OPERATION), + DEFAULT_CONCURRENT_REQUEST_COUNT); + if (concurrentRequestCount < DEFAULT_CONCURRENT_REQUEST_COUNT) { + LOG.warn("Invalid setting [{}] for concurrentRequestsPerOperation (too low); resetting to {}", + concurrentRequestCount, + DEFAULT_CONCURRENT_REQUEST_COUNT); + concurrentRequestCount = DEFAULT_CONCURRENT_REQUEST_COUNT; + } else if (concurrentRequestCount > MAX_CONCURRENT_REQUEST_COUNT) { + LOG.warn("Invalid setting [{}] for concurrentRequestsPerOperation (too high); resetting to {}", + concurrentRequestCount, + MAX_CONCURRENT_REQUEST_COUNT); + concurrentRequestCount = MAX_CONCURRENT_REQUEST_COUNT; + } + LOG.info("Using concurrentRequestsPerOperation={}", concurrentRequestCount); + + retryPolicy = UtilsV8.getRetryPolicy(properties.getProperty(AzureConstants.AZURE_BLOB_MAX_REQUEST_RETRY)); + if (properties.getProperty(AzureConstants.AZURE_BLOB_REQUEST_TIMEOUT) != null) { + requestTimeout = PropertiesUtil.toInteger(properties.getProperty(AzureConstants.AZURE_BLOB_REQUEST_TIMEOUT), RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT); + } + presignedDownloadURIVerifyExists = PropertiesUtil.toBoolean( + Strings.emptyToNull(properties.getProperty(AzureConstants.PRESIGNED_HTTP_DOWNLOAD_URI_VERIFY_EXISTS)), true); + + enableSecondaryLocation = PropertiesUtil.toBoolean( + properties.getProperty(AzureConstants.AZURE_BLOB_ENABLE_SECONDARY_LOCATION_NAME), + AzureConstants.AZURE_BLOB_ENABLE_SECONDARY_LOCATION_DEFAULT + ); + + CloudBlobContainer azureContainer = getAzureContainer(); + + if (createBlobContainer && !azureContainer.exists()) { + azureContainer.create(); + LOG.info("New container created. containerName={}", getContainerName()); + } else { + LOG.info("Reusing existing container. containerName={}", getContainerName()); + } + LOG.debug("Backend initialized. duration={}", (System.currentTimeMillis() - start)); + + // settings pertaining to DataRecordAccessProvider functionality + String putExpiry = properties.getProperty(AzureConstants.PRESIGNED_HTTP_UPLOAD_URI_EXPIRY_SECONDS); + if (null != putExpiry) { + this.setHttpUploadURIExpirySeconds(Integer.parseInt(putExpiry)); + } + String getExpiry = properties.getProperty(AzureConstants.PRESIGNED_HTTP_DOWNLOAD_URI_EXPIRY_SECONDS); + if (null != getExpiry) { + this.setHttpDownloadURIExpirySeconds(Integer.parseInt(getExpiry)); + String cacheMaxSize = properties.getProperty(AzureConstants.PRESIGNED_HTTP_DOWNLOAD_URI_CACHE_MAX_SIZE); + if (null != cacheMaxSize) { + this.setHttpDownloadURICacheSize(Integer.parseInt(cacheMaxSize)); + } + else { + this.setHttpDownloadURICacheSize(0); // default + } + } + uploadDomainOverride = properties.getProperty(AzureConstants.PRESIGNED_HTTP_UPLOAD_URI_DOMAIN_OVERRIDE, null); + downloadDomainOverride = properties.getProperty(AzureConstants.PRESIGNED_HTTP_DOWNLOAD_URI_DOMAIN_OVERRIDE, null); + + // Initialize reference key secret + boolean createRefSecretOnInit = PropertiesUtil.toBoolean( + Strings.emptyToNull(properties.getProperty(AzureConstants.AZURE_REF_ON_INIT)), true); + + if (createRefSecretOnInit) { + getOrCreateReferenceKey(); + } + } + catch (StorageException e) { + throw new DataStoreException(e); + } + } + finally { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + + private void initAzureDSConfig() { + AzureBlobContainerProviderV8.Builder builder = AzureBlobContainerProviderV8.Builder.builder(properties.getProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME)) + .withAzureConnectionString(properties.getProperty(AzureConstants.AZURE_CONNECTION_STRING, "")) + .withAccountName(properties.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, "")) + .withBlobEndpoint(properties.getProperty(AzureConstants.AZURE_BLOB_ENDPOINT, "")) + .withSasToken(properties.getProperty(AzureConstants.AZURE_SAS, "")) + .withAccountKey(properties.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY, "")) + .withTenantId(properties.getProperty(AzureConstants.AZURE_TENANT_ID, "")) + .withClientId(properties.getProperty(AzureConstants.AZURE_CLIENT_ID, "")) + .withClientSecret(properties.getProperty(AzureConstants.AZURE_CLIENT_SECRET, "")); + azureBlobContainerProvider = builder.build(); + } + + @Override + public InputStream read(DataIdentifier identifier) throws DataStoreException { + if (null == identifier) throw new NullPointerException("identifier"); + + String key = getKeyName(identifier); + long start = System.currentTimeMillis(); + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader( + getClass().getClassLoader()); + CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key); + if (!blob.exists()) { + throw new DataStoreException(String.format("Trying to read missing blob. identifier=%s", key)); + } + + InputStream is = blob.openInputStream(); + LOG.debug("Got input stream for blob. identifier={} duration={}", key, (System.currentTimeMillis() - start)); + if (LOG_STREAMS_DOWNLOAD.isDebugEnabled()) { + // Log message, with exception so we can get a trace to see where the call came from + LOG_STREAMS_DOWNLOAD.debug("Binary downloaded from Azure Blob Storage - identifier={}", key, new Exception()); + } + return is; + } + catch (StorageException e) { + LOG.info("Error reading blob. identifier={}", key); + throw new DataStoreException(String.format("Cannot read blob. identifier=%s", key), e); + } + catch (URISyntaxException e) { + LOG.debug("Error reading blob. identifier={}", key); + throw new DataStoreException(String.format("Cannot read blob. identifier=%s", key), e); + } finally { + if (contextClassLoader != null) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + } + + @Override + public void write(DataIdentifier identifier, File file) throws DataStoreException { + if (null == identifier) { + throw new NullPointerException("identifier"); + } + if (null == file) { + throw new NullPointerException("file"); + } + String key = getKeyName(identifier); + long start = System.currentTimeMillis(); + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + + long len = file.length(); + LOG.debug("Blob write started. identifier={} length={}", key, len); + CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key); + if (!blob.exists()) { + addLastModified(blob); + + BlobRequestOptions options = new BlobRequestOptions(); + options.setConcurrentRequestCount(concurrentRequestCount); + boolean useBufferedStream = len < BUFFERED_STREAM_THRESHOLD; + final InputStream in = useBufferedStream ? new BufferedInputStream(new FileInputStream(file)) : new FileInputStream(file); + try { + blob.upload(in, len, null, options, null); + LOG.debug("Blob created. identifier={} length={} duration={} buffered={}", key, len, (System.currentTimeMillis() - start), useBufferedStream); + if (LOG_STREAMS_UPLOAD.isDebugEnabled()) { + // Log message, with exception so we can get a trace to see where the call came from + LOG_STREAMS_UPLOAD.debug("Binary uploaded to Azure Blob Storage - identifier={}", key, new Exception()); + } + } finally { + in.close(); + } + return; + } + + blob.downloadAttributes(); + if (blob.getProperties().getLength() != len) { + throw new DataStoreException("Length Collision. identifier=" + key + + " new length=" + len + + " old length=" + blob.getProperties().getLength()); + } + + LOG.trace("Blob already exists. identifier={} lastModified={}", key, getLastModified(blob)); + addLastModified(blob); + blob.uploadMetadata(); + + LOG.debug("Blob updated. identifier={} lastModified={} duration={}", key, + getLastModified(blob), (System.currentTimeMillis() - start)); + } + catch (StorageException e) { + LOG.info("Error writing blob. identifier={}", key, e); + throw new DataStoreException(String.format("Cannot write blob. identifier=%s", key), e); + } + catch (URISyntaxException | IOException e) { + LOG.debug("Error writing blob. identifier={}", key, e); + throw new DataStoreException(String.format("Cannot write blob. identifier=%s", key), e); + } finally { + if (null != contextClassLoader) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + } + + private static boolean waitForCopy(CloudBlob blob) throws StorageException, InterruptedException { + boolean continueLoop = true; + CopyStatus status = CopyStatus.PENDING; + while (continueLoop) { + blob.downloadAttributes(); + status = blob.getCopyState().getStatus(); + continueLoop = status == CopyStatus.PENDING; + // Sleep if retry is needed + if (continueLoop) { + Thread.sleep(500); + } + } + return status == CopyStatus.SUCCESS; + } + + @Override + public DataRecord getRecord(DataIdentifier identifier) throws DataStoreException { + if (null == identifier) { + throw new NullPointerException("identifier"); + } + String key = getKeyName(identifier); + long start = System.currentTimeMillis(); + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + + CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key); + blob.downloadAttributes(); + AzureBlobStoreDataRecord record = new AzureBlobStoreDataRecord( + this, + azureBlobContainerProvider, + new DataIdentifier(getIdentifierName(blob.getName())), + getLastModified(blob), + blob.getProperties().getLength()); + LOG.debug("Data record read for blob. identifier={} duration={} record={}", + key, (System.currentTimeMillis() - start), record); + return record; + } + catch (StorageException e) { + if (404 == e.getHttpStatusCode()) { + LOG.debug("Unable to get record for blob; blob does not exist. identifier={}", key); + } + else { + LOG.info("Error getting data record for blob. identifier={}", key, e); + } + throw new DataStoreException(String.format("Cannot retrieve blob. identifier=%s", key), e); + } + catch (URISyntaxException e) { + LOG.debug("Error getting data record for blob. identifier={}", key, e); + throw new DataStoreException(String.format("Cannot retrieve blob. identifier=%s", key), e); + } finally { + if (contextClassLoader != null) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + } + + @Override + public Iterator getAllIdentifiers() { + return new RecordsIterator<>( + input -> new DataIdentifier(getIdentifierName(input.getName()))); + } + + @Override + public Iterator getAllRecords() { + final AbstractSharedBackend backend = this; + return new RecordsIterator<>( + input -> new AzureBlobStoreDataRecord( + backend, + azureBlobContainerProvider, + new DataIdentifier(getIdentifierName(input.getName())), + input.getLastModified(), + input.getLength()) + ); + } + + @Override + public boolean exists(DataIdentifier identifier) throws DataStoreException { + long start = System.currentTimeMillis(); + String key = getKeyName(identifier); + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + + boolean exists =getAzureContainer().getBlockBlobReference(key).exists(); + LOG.debug("Blob exists={} identifier={} duration={}", exists, key, (System.currentTimeMillis() - start)); + return exists; + } + catch (Exception e) { + throw new DataStoreException(e); + } + finally { + if (null != contextClassLoader) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + } + + @Override + public void close() { + azureBlobContainerProvider.close(); + LOG.info("AzureBlobBackend closed."); + } + + @Override + public void deleteRecord(DataIdentifier identifier) throws DataStoreException { + if (null == identifier) throw new NullPointerException("identifier"); + + String key = getKeyName(identifier); + long start = System.currentTimeMillis(); + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + + boolean result = getAzureContainer().getBlockBlobReference(key).deleteIfExists(); + LOG.debug("Blob {}. identifier={} duration={}", + result ? "deleted" : "delete requested, but it does not exist (perhaps already deleted)", + key, (System.currentTimeMillis() - start)); + } + catch (StorageException e) { + LOG.info("Error deleting blob. identifier={}", key, e); + throw new DataStoreException(e); + } + catch (URISyntaxException e) { + throw new DataStoreException(e); + } finally { + if (contextClassLoader != null) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + } + + @Override + public void addMetadataRecord(InputStream input, String name) throws DataStoreException { + if (null == input) { + throw new NullPointerException("input"); + } + if (Strings.isNullOrEmpty(name)) { + throw new IllegalArgumentException("name"); + } + long start = System.currentTimeMillis(); + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + + addMetadataRecordImpl(input, name, -1L); + LOG.debug("Metadata record added. metadataName={} duration={}", name, (System.currentTimeMillis() - start)); + } + finally { + if (null != contextClassLoader) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + } + + @Override + public void addMetadataRecord(File input, String name) throws DataStoreException { + if (null == input) { + throw new NullPointerException("input"); + } + if (Strings.isNullOrEmpty(name)) { + throw new IllegalArgumentException("name"); + } + long start = System.currentTimeMillis(); + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + + addMetadataRecordImpl(new FileInputStream(input), name, input.length()); + LOG.debug("Metadata record added. metadataName={} duration={}", name, (System.currentTimeMillis() - start)); + } + catch (FileNotFoundException e) { + throw new DataStoreException(e); + } + finally { + if (null != contextClassLoader) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + } + + private void addMetadataRecordImpl(final InputStream input, String name, long recordLength) throws DataStoreException { + try { + CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME); + CloudBlockBlob blob = metaDir.getBlockBlobReference(name); + addLastModified(blob); + blob.upload(input, recordLength); + } + catch (StorageException e) { + LOG.info("Error adding metadata record. metadataName={} length={}", name, recordLength, e); + throw new DataStoreException(e); + } + catch (URISyntaxException | IOException e) { + throw new DataStoreException(e); + } + } + + @Override + public DataRecord getMetadataRecord(String name) { + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + long start = System.currentTimeMillis(); + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + + CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME); + CloudBlockBlob blob = metaDir.getBlockBlobReference(name); + if (!blob.exists()) { + LOG.warn("Trying to read missing metadata. metadataName={}", name); + return null; + } + blob.downloadAttributes(); + long lastModified = getLastModified(blob); + long length = blob.getProperties().getLength(); + AzureBlobStoreDataRecord record = new AzureBlobStoreDataRecord(this, + azureBlobContainerProvider, + new DataIdentifier(name), + lastModified, + length, + true); + LOG.debug("Metadata record read. metadataName={} duration={} record={}", name, (System.currentTimeMillis() - start), record); + return record; + + } catch (StorageException e) { + LOG.info("Error reading metadata record. metadataName={}", name, e); + throw new RuntimeException(e); + } catch (Exception e) { + LOG.debug("Error reading metadata record. metadataName={}", name, e); + throw new RuntimeException(e); + } finally { + if (null != contextClassLoader) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + } + + @Override + public List getAllMetadataRecords(String prefix) { + if (null == prefix) { + throw new NullPointerException("prefix"); + } + long start = System.currentTimeMillis(); + final List records = Lists.newArrayList(); + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + + CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME); + for (ListBlobItem item : metaDir.listBlobs(prefix)) { + if (item instanceof CloudBlob) { + CloudBlob blob = (CloudBlob) item; + blob.downloadAttributes(); + records.add(new AzureBlobStoreDataRecord( + this, + azureBlobContainerProvider, + new DataIdentifier(stripMetaKeyPrefix(blob.getName())), + getLastModified(blob), + blob.getProperties().getLength(), + true)); + } + } + LOG.debug("Metadata records read. recordsRead={} metadataFolder={} duration={}", records.size(), prefix, (System.currentTimeMillis() - start)); + } + catch (StorageException e) { + LOG.info("Error reading all metadata records. metadataFolder={}", prefix, e); + } + catch (DataStoreException | URISyntaxException e) { + LOG.debug("Error reading all metadata records. metadataFolder={}", prefix, e); + } + finally { + if (null != contextClassLoader) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + return records; + } + + @Override + public boolean deleteMetadataRecord(String name) { + long start = System.currentTimeMillis(); + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + + CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(addMetaKeyPrefix(name)); + boolean result = blob.deleteIfExists(); + LOG.debug("Metadata record {}. metadataName={} duration={}", + result ? "deleted" : "delete requested, but it does not exist (perhaps already deleted)", + name, (System.currentTimeMillis() - start)); + return result; + + } + catch (StorageException e) { + LOG.info("Error deleting metadata record. metadataName={}", name, e); + } + catch (DataStoreException | URISyntaxException e) { + LOG.debug("Error deleting metadata record. metadataName={}", name, e); + } + finally { + if (contextClassLoader != null) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + return false; + } + + @Override + public void deleteAllMetadataRecords(String prefix) { + if (null == prefix) { + throw new NullPointerException("prefix"); + } + long start = System.currentTimeMillis(); + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + + CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME); + int total = 0; + for (ListBlobItem item : metaDir.listBlobs(prefix)) { + if (item instanceof CloudBlob) { + if (((CloudBlob)item).deleteIfExists()) { + total++; + } + } + } + LOG.debug("Metadata records deleted. recordsDeleted={} metadataFolder={} duration={}", + total, prefix, (System.currentTimeMillis() - start)); + + } + catch (StorageException e) { + LOG.info("Error deleting all metadata records. metadataFolder={}", prefix, e); + } + catch (DataStoreException | URISyntaxException e) { + LOG.debug("Error deleting all metadata records. metadataFolder={}", prefix, e); + } + finally { + if (null != contextClassLoader) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + } + + @Override + public boolean metadataRecordExists(String name) { + long start = System.currentTimeMillis(); + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); + CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(addMetaKeyPrefix(name)); + boolean exists = blob.exists(); + LOG.debug("Metadata record {} exists {}. duration={}", name, exists, (System.currentTimeMillis() - start)); + return exists; + } + catch (DataStoreException | StorageException | URISyntaxException e) { + LOG.debug("Error checking existence of metadata record = {}", name, e); + } + finally { + if (contextClassLoader != null) { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + } + return false; + } + + /** + * Get key from data identifier. Object is stored with key in ADS. + */ + private static String getKeyName(DataIdentifier identifier) { + String key = identifier.toString(); + return key.substring(0, 4) + UtilsV8.DASH + key.substring(4); + } + + /** + * Get data identifier from key. + */ + private static String getIdentifierName(String key) { + if (!key.contains(UtilsV8.DASH)) { + return null; + } else if (key.contains(META_KEY_PREFIX)) { + return key; + } + return key.substring(0, 4) + key.substring(5); + } + + private static String addMetaKeyPrefix(final String key) { + return META_KEY_PREFIX + key; + } + + private static String stripMetaKeyPrefix(String name) { + if (name.startsWith(META_KEY_PREFIX)) { + return name.substring(META_KEY_PREFIX.length()); + } + return name; + } + + private static void addLastModified(CloudBlockBlob blob) { + blob.getMetadata().put(LAST_MODIFIED_KEY, String.valueOf(System.currentTimeMillis())); + } + + private static long getLastModified(CloudBlob blob) { + if (blob.getMetadata().containsKey(LAST_MODIFIED_KEY)) { + return Long.parseLong(blob.getMetadata().get(LAST_MODIFIED_KEY)); + } + return blob.getProperties().getLastModified().getTime(); + } + + protected void setHttpDownloadURIExpirySeconds(int seconds) { + httpDownloadURIExpirySeconds = seconds; + } + + protected void setHttpDownloadURICacheSize(int maxSize) { + // max size 0 or smaller is used to turn off the cache + if (maxSize > 0) { + LOG.info("presigned GET URI cache enabled, maxSize = {} items, expiry = {} seconds", maxSize, httpDownloadURIExpirySeconds / 2); + httpDownloadURICache = CacheBuilder.newBuilder() + .maximumSize(maxSize) + .expireAfterWrite(httpDownloadURIExpirySeconds / 2, TimeUnit.SECONDS) + .build(); + } else { + LOG.info("presigned GET URI cache disabled"); + httpDownloadURICache = null; + } + } + + protected URI createHttpDownloadURI(@NotNull DataIdentifier identifier, + @NotNull DataRecordDownloadOptions downloadOptions) { + URI uri = null; + + // When running unit test from Maven, it doesn't always honor the @NotNull decorators + if (null == identifier) throw new NullPointerException("identifier"); + if (null == downloadOptions) throw new NullPointerException("downloadOptions"); + + if (httpDownloadURIExpirySeconds > 0) { + + String domain = getDirectDownloadBlobStorageDomain(downloadOptions.isDomainOverrideIgnored()); + if (null == domain) { + throw new NullPointerException("Could not determine domain for direct download"); + } + + String cacheKey = identifier + + domain + + Objects.toString(downloadOptions.getContentTypeHeader(), "") + + Objects.toString(downloadOptions.getContentDispositionHeader(), ""); + if (null != httpDownloadURICache) { + uri = httpDownloadURICache.getIfPresent(cacheKey); + } + if (null == uri) { + if (presignedDownloadURIVerifyExists) { + // Check if this identifier exists. If not, we want to return null + // even if the identifier is in the download URI cache. + try { + if (!exists(identifier)) { + LOG.warn("Cannot create download URI for nonexistent blob {}; returning null", getKeyName(identifier)); + return null; + } + } catch (DataStoreException e) { + LOG.warn("Cannot create download URI for blob {} (caught DataStoreException); returning null", getKeyName(identifier), e); + return null; + } + } + + String key = getKeyName(identifier); + SharedAccessBlobHeaders headers = new SharedAccessBlobHeaders(); + headers.setCacheControl(String.format("private, max-age=%d, immutable", httpDownloadURIExpirySeconds)); + + String contentType = downloadOptions.getContentTypeHeader(); + if (!Strings.isNullOrEmpty(contentType)) { + headers.setContentType(contentType); + } + + String contentDisposition = + downloadOptions.getContentDispositionHeader(); + if (!Strings.isNullOrEmpty(contentDisposition)) { + headers.setContentDisposition(contentDisposition); + } + + uri = createPresignedURI(key, + EnumSet.of(SharedAccessBlobPermissions.READ), + httpDownloadURIExpirySeconds, + headers, + domain); + if (uri != null && httpDownloadURICache != null) { + httpDownloadURICache.put(cacheKey, uri); + } + } + } + return uri; + } + + protected void setHttpUploadURIExpirySeconds(int seconds) { httpUploadURIExpirySeconds = seconds; } + + private DataIdentifier generateSafeRandomIdentifier() { + return new DataIdentifier( + String.format("%s-%d", + UUID.randomUUID().toString(), + Instant.now().toEpochMilli() + ) + ); + } + + protected DataRecordUpload initiateHttpUpload(long maxUploadSizeInBytes, int maxNumberOfURIs, @NotNull final DataRecordUploadOptions options) { + List uploadPartURIs = Lists.newArrayList(); + long minPartSize = MIN_MULTIPART_UPLOAD_PART_SIZE; + long maxPartSize = MAX_MULTIPART_UPLOAD_PART_SIZE; + + if (0L >= maxUploadSizeInBytes) { + throw new IllegalArgumentException("maxUploadSizeInBytes must be > 0"); + } + else if (0 == maxNumberOfURIs) { + throw new IllegalArgumentException("maxNumberOfURIs must either be > 0 or -1"); + } + else if (-1 > maxNumberOfURIs) { + throw new IllegalArgumentException("maxNumberOfURIs must either be > 0 or -1"); + } + else if (maxUploadSizeInBytes > MAX_SINGLE_PUT_UPLOAD_SIZE && + maxNumberOfURIs == 1) { + throw new IllegalArgumentException( + String.format("Cannot do single-put upload with file size %d - exceeds max single-put upload size of %d", + maxUploadSizeInBytes, + MAX_SINGLE_PUT_UPLOAD_SIZE) + ); + } + else if (maxUploadSizeInBytes > MAX_BINARY_UPLOAD_SIZE) { + throw new IllegalArgumentException( + String.format("Cannot do upload with file size %d - exceeds max upload size of %d", + maxUploadSizeInBytes, + MAX_BINARY_UPLOAD_SIZE) + ); + } + + DataIdentifier newIdentifier = generateSafeRandomIdentifier(); + String blobId = getKeyName(newIdentifier); + String uploadId = null; + + if (httpUploadURIExpirySeconds > 0) { + // Always do multi-part uploads for Azure, even for small binaries. + // + // This is because Azure requires a unique header, "x-ms-blob-type=BlockBlob", to be + // set but only for single-put uploads, not multi-part. + // This would require clients to know not only the type of service provider being used + // but also the type of upload (single-put vs multi-part), which breaks abstraction. + // Instead we can insist that clients always do multi-part uploads to Azure, even + // if the multi-part upload consists of only one upload part. This doesn't require + // additional work on the part of the client since the "complete" request must always + // be sent regardless, but it helps us avoid the client having to know what type + // of provider is being used, or us having to instruct the client to use specific + // types of headers, etc. + + // Azure doesn't use upload IDs like AWS does + // Generate a fake one for compatibility - we use them to determine whether we are + // doing multi-part or single-put upload + uploadId = Base64.encode(UUID.randomUUID().toString()); + + long numParts = 0L; + if (maxNumberOfURIs > 0) { + long requestedPartSize = (long) Math.ceil(((double) maxUploadSizeInBytes) / ((double) maxNumberOfURIs)); + if (requestedPartSize <= maxPartSize) { + numParts = Math.min( + maxNumberOfURIs, + Math.min( + (long) Math.ceil(((double) maxUploadSizeInBytes) / ((double) minPartSize)), + MAX_ALLOWABLE_UPLOAD_URIS + ) + ); + } else { + throw new IllegalArgumentException( + String.format("Cannot do multi-part upload with requested part size %d", requestedPartSize) + ); + } + } + else { + long maximalNumParts = (long) Math.ceil(((double) maxUploadSizeInBytes) / ((double) MIN_MULTIPART_UPLOAD_PART_SIZE)); + numParts = Math.min(maximalNumParts, MAX_ALLOWABLE_UPLOAD_URIS); + } + + String key = getKeyName(newIdentifier); + String domain = getDirectUploadBlobStorageDomain(options.isDomainOverrideIgnored()); + if (null == domain) { + throw new NullPointerException("Could not determine domain for direct upload"); + } + + EnumSet perms = EnumSet.of(SharedAccessBlobPermissions.WRITE); + Map presignedURIRequestParams = Maps.newHashMap(); + // see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block#uri-parameters + presignedURIRequestParams.put("comp", "block"); + for (long blockId = 1; blockId <= numParts; ++blockId) { + presignedURIRequestParams.put("blockid", + Base64.encode(String.format("%06d", blockId))); + uploadPartURIs.add( + createPresignedURI(key, + perms, + httpUploadURIExpirySeconds, + presignedURIRequestParams, + domain) + ); + } + + try { + byte[] secret = getOrCreateReferenceKey(); + String uploadToken = new DataRecordUploadToken(blobId, uploadId).getEncodedToken(secret); + return new DataRecordUpload() { + @Override + @NotNull + public String getUploadToken() { + return uploadToken; + } + + @Override + public long getMinPartSize() { + return minPartSize; + } + + @Override + public long getMaxPartSize() { + return maxPartSize; + } + + @Override + @NotNull + public Collection getUploadURIs() { + return uploadPartURIs; + } + }; + } catch (DataStoreException e) { + LOG.warn("Unable to obtain data store key"); + } + } + + return null; + } + + protected DataRecord completeHttpUpload(@NotNull String uploadTokenStr) + throws DataRecordUploadException, DataStoreException { + + if (Strings.isNullOrEmpty(uploadTokenStr)) { + throw new IllegalArgumentException("uploadToken required"); + } + + DataRecordUploadToken uploadToken = DataRecordUploadToken.fromEncodedToken(uploadTokenStr, getOrCreateReferenceKey()); + String key = uploadToken.getBlobId(); + DataIdentifier blobId = new DataIdentifier(getIdentifierName(key)); + + DataRecord record = null; + try { + record = getRecord(blobId); + // If this succeeds this means either it was a "single put" upload + // (we don't need to do anything in this case - blob is already uploaded) + // or it was completed before with the same token. + } + catch (DataStoreException e1) { + // record doesn't exist - so this means we are safe to do the complete request + try { + if (uploadToken.getUploadId().isPresent()) { + CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key); + // An existing upload ID means this is a multi-part upload + List blocks = blob.downloadBlockList( + BlockListingFilter.UNCOMMITTED, + AccessCondition.generateEmptyCondition(), + null, + null); + addLastModified(blob); + blob.commitBlockList(blocks); + long size = 0L; + for (BlockEntry block : blocks) { + size += block.getSize(); + } + record = new AzureBlobStoreDataRecord( + this, + azureBlobContainerProvider, + blobId, + getLastModified(blob), + size); + } + else { + // Something is wrong - upload ID missing from upload token + // but record doesn't exist already, so this is invalid + throw new DataRecordUploadException( + String.format("Unable to finalize direct write of binary %s - upload ID missing from upload token", + blobId) + ); + } + } catch (URISyntaxException | StorageException e2) { + throw new DataRecordUploadException( + String.format("Unable to finalize direct write of binary %s", blobId), + e2 + ); + } + } + + return record; + } + + private String getDefaultBlobStorageDomain() { + String accountName = properties.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, ""); + if (Strings.isNullOrEmpty(accountName)) { + LOG.warn("Can't generate presigned URI - Azure account name not found in properties"); + return null; + } + return String.format("%s.blob.core.windows.net", accountName); + } + + private String getDirectDownloadBlobStorageDomain(boolean ignoreDomainOverride) { + String domain = ignoreDomainOverride + ? getDefaultBlobStorageDomain() + : downloadDomainOverride; + if (Strings.isNullOrEmpty(domain)) { + domain = getDefaultBlobStorageDomain(); + } + return domain; + } + + private String getDirectUploadBlobStorageDomain(boolean ignoreDomainOverride) { + String domain = ignoreDomainOverride + ? getDefaultBlobStorageDomain() + : uploadDomainOverride; + if (Strings.isNullOrEmpty(domain)) { + domain = getDefaultBlobStorageDomain(); + } + return domain; + } + + private URI createPresignedURI(String key, + EnumSet permissions, + int expirySeconds, + SharedAccessBlobHeaders optionalHeaders, + String domain) { + return createPresignedURI(key, permissions, expirySeconds, Maps.newHashMap(), optionalHeaders, domain); + } + + private URI createPresignedURI(String key, + EnumSet permissions, + int expirySeconds, + Map additionalQueryParams, + String domain) { + return createPresignedURI(key, permissions, expirySeconds, additionalQueryParams, null, domain); + } + + private URI createPresignedURI(String key, + EnumSet permissions, + int expirySeconds, + Map additionalQueryParams, + SharedAccessBlobHeaders optionalHeaders, + String domain) { + if (Strings.isNullOrEmpty(domain)) { + LOG.warn("Can't generate presigned URI - no Azure domain provided (is Azure account name configured?)"); + return null; + } + + URI presignedURI = null; + try { + String sharedAccessSignature = azureBlobContainerProvider.generateSharedAccessSignature(getBlobRequestOptions(), key, + permissions, expirySeconds, optionalHeaders); + + // Shared access signature is returned encoded already. + String uriString = String.format("https://%s/%s/%s?%s", + domain, + getContainerName(), + key, + sharedAccessSignature); + + if (! additionalQueryParams.isEmpty()) { + StringBuilder builder = new StringBuilder(); + for (Map.Entry e : additionalQueryParams.entrySet()) { + builder.append("&"); + builder.append(URLEncoder.encode(e.getKey(), StandardCharsets.UTF_8)); + builder.append("="); + builder.append(URLEncoder.encode(e.getValue(), StandardCharsets.UTF_8)); + } + uriString += builder.toString(); + } + + presignedURI = new URI(uriString); + } + catch (DataStoreException e) { + LOG.error("No connection to Azure Blob Storage", e); + } + catch (URISyntaxException | InvalidKeyException e) { + LOG.error("Can't generate a presigned URI for key {}", key, e); + } + catch (StorageException e) { + LOG.error("Azure request to create presigned Azure Blob Storage {} URI failed. " + + "Key: {}, Error: {}, HTTP Code: {}, Azure Error Code: {}", + permissions.contains(SharedAccessBlobPermissions.READ) ? "GET" : + (permissions.contains(SharedAccessBlobPermissions.WRITE) ? "PUT" : ""), + key, + e.getMessage(), + e.getHttpStatusCode(), + e.getErrorCode()); + } + + return presignedURI; + } + + private static class AzureBlobInfo { + private final String name; + private final long lastModified; + private final long length; + + public AzureBlobInfo(String name, long lastModified, long length) { + this.name = name; + this.lastModified = lastModified; + this.length = length; + } + + public String getName() { + return name; + } + + public long getLastModified() { + return lastModified; + } + + public long getLength() { + return length; + } + + public static AzureBlobInfo fromCloudBlob(CloudBlob cloudBlob) throws StorageException { + cloudBlob.downloadAttributes(); + return new AzureBlobInfo(cloudBlob.getName(), + AzureBlobStoreBackendV8.getLastModified(cloudBlob), + cloudBlob.getProperties().getLength()); + } + } + + private class RecordsIterator extends AbstractIterator { + // Seems to be thread-safe (in 5.0.0) + ResultContinuation resultContinuation; + boolean firstCall = true; + final Function transformer; + final Queue items = Lists.newLinkedList(); + + public RecordsIterator (Function transformer) { + this.transformer = transformer; + } + + @Override + protected T computeNext() { + if (items.isEmpty()) { + loadItems(); + } + if (!items.isEmpty()) { + return transformer.apply(items.remove()); + } + return endOfData(); + } + + private boolean loadItems() { + long start = System.currentTimeMillis(); + ClassLoader contextClassLoader = currentThread().getContextClassLoader(); + try { + currentThread().setContextClassLoader(getClass().getClassLoader()); + + CloudBlobContainer container = azureBlobContainerProvider.getBlobContainer(); + if (!firstCall && (resultContinuation == null || !resultContinuation.hasContinuation())) { + LOG.trace("No more records in container. containerName={}", container); + return false; + } + firstCall = false; + ResultSegment results = container.listBlobsSegmented(null, false, EnumSet.noneOf(BlobListingDetails.class), null, resultContinuation, null, null); + resultContinuation = results.getContinuationToken(); + for (ListBlobItem item : results.getResults()) { + if (item instanceof CloudBlob) { + items.add(AzureBlobInfo.fromCloudBlob((CloudBlob)item)); + } + } + LOG.debug("Container records batch read. batchSize={} containerName={} duration={}", + results.getLength(), getContainerName(), (System.currentTimeMillis() - start)); + return results.getLength() > 0; + } + catch (StorageException e) { + LOG.info("Error listing blobs. containerName={}", getContainerName(), e); + } + catch (DataStoreException e) { + LOG.debug("Cannot list blobs. containerName={}", getContainerName(), e); + } finally { + if (contextClassLoader != null) { + currentThread().setContextClassLoader(contextClassLoader); + } + } + return false; + } + } + + static class AzureBlobStoreDataRecord extends AbstractDataRecord { + final AzureBlobContainerProviderV8 azureBlobContainerProvider; + final long lastModified; + final long length; + final boolean isMeta; + + public AzureBlobStoreDataRecord(AbstractSharedBackend backend, AzureBlobContainerProviderV8 azureBlobContainerProvider, + DataIdentifier key, long lastModified, long length) { + this(backend, azureBlobContainerProvider, key, lastModified, length, false); + } + + public AzureBlobStoreDataRecord(AbstractSharedBackend backend, AzureBlobContainerProviderV8 azureBlobContainerProvider, + DataIdentifier key, long lastModified, long length, boolean isMeta) { + super(backend, key); + this.azureBlobContainerProvider = azureBlobContainerProvider; + this.lastModified = lastModified; + this.length = length; + this.isMeta = isMeta; + } + + @Override + public long getLength() { + return length; + } + + @Override + public InputStream getStream() throws DataStoreException { + String id = getKeyName(getIdentifier()); + CloudBlobContainer container = azureBlobContainerProvider.getBlobContainer(); + if (isMeta) { + id = addMetaKeyPrefix(getIdentifier().toString()); + } + else { + // Don't worry about stream logging for metadata records + if (LOG_STREAMS_DOWNLOAD.isDebugEnabled()) { + // Log message, with exception so we can get a trace to see where the call came from + LOG_STREAMS_DOWNLOAD.debug("Binary downloaded from Azure Blob Storage - identifier={} ", id, new Exception()); + } + } + try { + return container.getBlockBlobReference(id).openInputStream(); + } catch (StorageException | URISyntaxException e) { + throw new DataStoreException(e); + } + } + + @Override + public long getLastModified() { + return lastModified; + } + + @Override + public String toString() { + return "AzureBlobStoreDataRecord{" + + "identifier=" + getIdentifier() + + ", length=" + length + + ", lastModified=" + lastModified + + ", containerName='" + Optional.ofNullable(azureBlobContainerProvider).map(AzureBlobContainerProviderV8::getContainerName).orElse(null) + '\'' + + '}'; + } + } + + private String getContainerName() { + return Optional.ofNullable(this.azureBlobContainerProvider) + .map(AzureBlobContainerProviderV8::getContainerName) + .orElse(null); + } + + @Override + public byte[] getOrCreateReferenceKey() throws DataStoreException { + try { + if (secret != null && secret.length != 0) { + return secret; + } else { + byte[] key; + // Try reading from the metadata folder if it exists + key = readMetadataBytes(REF_KEY); + if (key == null) { + key = super.getOrCreateReferenceKey(); + addMetadataRecord(new ByteArrayInputStream(key), REF_KEY); + key = readMetadataBytes(REF_KEY); + } + secret = key; + return secret; + } + } catch (IOException e) { + throw new DataStoreException("Unable to get or create key " + e); + } + } + + protected byte[] readMetadataBytes(String name) throws IOException, DataStoreException { + DataRecord rec = getMetadataRecord(name); + byte[] key = null; + if (rec != null) { + InputStream stream = null; + try { + stream = rec.getStream(); + return IOUtils.toByteArray(stream); + } finally { + IOUtils.closeQuietly(stream); + } + } + return key; + } + +} diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8.java new file mode 100644 index 00000000000..d7ceca13d9c --- /dev/null +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8; + +import com.microsoft.azure.storage.CloudStorageAccount; +import com.microsoft.azure.storage.OperationContext; +import com.microsoft.azure.storage.RetryExponentialRetry; +import com.microsoft.azure.storage.RetryNoRetry; +import com.microsoft.azure.storage.RetryPolicy; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.microsoft.azure.storage.blob.CloudBlobClient; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import org.apache.commons.lang3.StringUtils; +import org.apache.jackrabbit.core.data.DataStoreException; +import com.google.common.base.Strings; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; +import org.apache.jackrabbit.oak.commons.PropertiesUtil; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.InetSocketAddress; +import java.net.Proxy; +import java.net.SocketAddress; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.Properties; + +public final class UtilsV8 { + + public static final String DEFAULT_CONFIG_FILE = "azure.properties"; + + public static final String DASH = "-"; + + /** + * private constructor so that class cannot initialized from outside. + */ + private UtilsV8() { + } + + /** + * Create CloudBlobClient from properties. + * + * @param connectionString connectionString to configure @link {@link CloudBlobClient} + * @return {@link CloudBlobClient} + */ + public static CloudBlobClient getBlobClient(@NotNull final String connectionString) throws URISyntaxException, InvalidKeyException { + return getBlobClient(connectionString, null); + } + + public static CloudBlobClient getBlobClient(@NotNull final String connectionString, + @Nullable final BlobRequestOptions requestOptions) throws URISyntaxException, InvalidKeyException { + CloudStorageAccount account = CloudStorageAccount.parse(connectionString); + CloudBlobClient client = account.createCloudBlobClient(); + if (null != requestOptions) { + client.setDefaultRequestOptions(requestOptions); + } + return client; + } + + public static CloudBlobContainer getBlobContainer(@NotNull final String connectionString, + @NotNull final String containerName) throws DataStoreException { + return getBlobContainer(connectionString, containerName, null); + } + + + public static CloudBlobContainer getBlobContainer(@NotNull final String connectionString, + @NotNull final String containerName, + @Nullable final BlobRequestOptions requestOptions) throws DataStoreException { + try { + CloudBlobClient client = ( + (null == requestOptions) + ? UtilsV8.getBlobClient(connectionString) + : UtilsV8.getBlobClient(connectionString, requestOptions) + ); + return client.getContainerReference(containerName); + } catch (InvalidKeyException | URISyntaxException | StorageException e) { + throw new DataStoreException(e); + } + } + + public static void setProxyIfNeeded(final Properties properties) { + String proxyHost = properties.getProperty(AzureConstants.PROXY_HOST); + String proxyPort = properties.getProperty(AzureConstants.PROXY_PORT); + + if (!Strings.isNullOrEmpty(proxyHost) && + Strings.isNullOrEmpty(proxyPort)) { + int port = Integer.parseInt(proxyPort); + SocketAddress proxyAddr = new InetSocketAddress(proxyHost, port); + Proxy proxy = new Proxy(Proxy.Type.HTTP, proxyAddr); + OperationContext.setDefaultProxy(proxy); + } + } + + public static String getConnectionStringFromProperties(Properties properties) { + + String sasUri = properties.getProperty(AzureConstants.AZURE_SAS, ""); + String blobEndpoint = properties.getProperty(AzureConstants.AZURE_BLOB_ENDPOINT, ""); + String connectionString = properties.getProperty(AzureConstants.AZURE_CONNECTION_STRING, ""); + String accountName = properties.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, ""); + String accountKey = properties.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY, ""); + + if (!connectionString.isEmpty()) { + return connectionString; + } + + if (!sasUri.isEmpty()) { + return getConnectionStringForSas(sasUri, blobEndpoint, accountName); + } + + return getConnectionString( + accountName, + accountKey, + blobEndpoint); + } + + public static String getConnectionStringForSas(String sasUri, String blobEndpoint, String accountName) { + if (StringUtils.isEmpty(blobEndpoint)) { + return String.format("AccountName=%s;SharedAccessSignature=%s", accountName, sasUri); + } else { + return String.format("BlobEndpoint=%s;SharedAccessSignature=%s", blobEndpoint, sasUri); + } + } + + public static String getConnectionString(final String accountName, final String accountKey) { + return getConnectionString(accountName, accountKey, null); + } + + public static String getConnectionString(final String accountName, final String accountKey, String blobEndpoint) { + StringBuilder connString = new StringBuilder("DefaultEndpointsProtocol=https"); + connString.append(";AccountName=").append(accountName); + connString.append(";AccountKey=").append(accountKey); + + if (!Strings.isNullOrEmpty(blobEndpoint)) { + connString.append(";BlobEndpoint=").append(blobEndpoint); + } + return connString.toString(); + } + + public static RetryPolicy getRetryPolicy(final String maxRequestRetry) { + int retries = PropertiesUtil.toInteger(maxRequestRetry, -1); + if (retries < 0) { + return null; + } + if (retries == 0) { + return new RetryNoRetry(); + } + return new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, retries); + } +} diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendTest.java index 41acca88007..c8f777f15f5 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendTest.java @@ -18,8 +18,10 @@ */ package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.sas.BlobSasPermission; +import com.azure.storage.blob.sas.BlobServiceSasSignatureValues; import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy; import org.apache.jackrabbit.core.data.DataRecord; @@ -30,22 +32,17 @@ import org.junit.ClassRule; import org.junit.Test; -import java.io.IOException; -import java.net.URISyntaxException; import java.time.Duration; import java.time.Instant; +import java.time.OffsetDateTime; import java.util.Date; import java.util.EnumSet; import java.util.Properties; import java.util.Set; import java.util.stream.StreamSupport; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.ADD; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.CREATE; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.LIST; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.READ; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.WRITE; import static java.util.stream.Collectors.toSet; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.META_DIR_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -61,11 +58,9 @@ public class AzureBlobStoreBackendTest { public static AzuriteDockerRule azurite = new AzuriteDockerRule(); private static final String CONTAINER_NAME = "blobstore"; - private static final EnumSet READ_ONLY = EnumSet.of(READ, LIST); - private static final EnumSet READ_WRITE = EnumSet.of(READ, LIST, CREATE, WRITE, ADD); private static final Set BLOBS = Set.of("blob1", "blob2"); - private CloudBlobContainer container; + private BlobContainerClient container; @After public void tearDown() throws Exception { @@ -76,8 +71,14 @@ public void tearDown() throws Exception { @Test public void initWithSharedAccessSignature_readOnly() throws Exception { - CloudBlobContainer container = createBlobContainer(); - String sasToken = container.generateSharedAccessSignature(policy(READ_ONLY), null); + BlobContainerClient container = createBlobContainer(); + OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(7); + BlobSasPermission permissions = new BlobSasPermission().setReadPermission(true) + .setWritePermission(false) + .setListPermission(true); + + BlobServiceSasSignatureValues sasValues = new BlobServiceSasSignatureValues(expiryTime, permissions); + String sasToken = container.generateSas(sasValues); AzureBlobStoreBackend azureBlobStoreBackend = new AzureBlobStoreBackend(); azureBlobStoreBackend.setProperties(getConfigurationWithSasToken(sasToken)); @@ -90,8 +91,16 @@ public void initWithSharedAccessSignature_readOnly() throws Exception { @Test public void initWithSharedAccessSignature_readWrite() throws Exception { - CloudBlobContainer container = createBlobContainer(); - String sasToken = container.generateSharedAccessSignature(policy(READ_WRITE), null); + BlobContainerClient container = createBlobContainer(); + OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(7); + BlobSasPermission permissions = new BlobSasPermission().setReadPermission(true) + .setListPermission(true) + .setAddPermission(true) + .setCreatePermission(true) + .setWritePermission(true); + + BlobServiceSasSignatureValues sasValues = new BlobServiceSasSignatureValues(expiryTime, permissions); + String sasToken = container.generateSas(sasValues); AzureBlobStoreBackend azureBlobStoreBackend = new AzureBlobStoreBackend(); azureBlobStoreBackend.setProperties(getConfigurationWithSasToken(sasToken)); @@ -104,9 +113,14 @@ public void initWithSharedAccessSignature_readWrite() throws Exception { @Test public void connectWithSharedAccessSignatureURL_expired() throws Exception { - CloudBlobContainer container = createBlobContainer(); - SharedAccessBlobPolicy expiredPolicy = policy(READ_WRITE, yesterday()); - String sasToken = container.generateSharedAccessSignature(expiredPolicy, null); + BlobContainerClient container = createBlobContainer(); + + OffsetDateTime expiryTime = OffsetDateTime.now().minusDays(1); + BlobSasPermission permissions = new BlobSasPermission().setReadPermission(true) + .setWritePermission(true); + + BlobServiceSasSignatureValues sasValues = new BlobServiceSasSignatureValues(expiryTime, permissions); + String sasToken = container.generateSas(sasValues); AzureBlobStoreBackend azureBlobStoreBackend = new AzureBlobStoreBackend(); azureBlobStoreBackend.setProperties(getConfigurationWithSasToken(sasToken)); @@ -186,10 +200,10 @@ private String getEnvironmentVariable(String variableName) { return System.getenv(variableName); } - private CloudBlobContainer createBlobContainer() throws Exception { - container = azurite.getContainer("blobstore"); + private BlobContainerClient createBlobContainer() throws Exception { + container = azurite.getContainer("blobstore", getConnectionString()); for (String blob : BLOBS) { - container.getBlockBlobReference(blob + ".txt").uploadText(blob); + container.getBlobClient(blob + ".txt").upload(BinaryData.fromString(blob), true); } return container; } @@ -239,11 +253,10 @@ private static SharedAccessBlobPolicy policy(EnumSet expectedBlobs) throws Exception { - CloudBlobContainer container = backend.getAzureContainer(); + BlobContainerClient container = backend.getAzureContainer(); Set actualBlobNames = StreamSupport.stream(container.listBlobs().spliterator(), false) - .map(blob -> blob.getUri().getPath()) - .map(path -> path.substring(path.lastIndexOf('/') + 1)) - .filter(path -> !path.isEmpty()) + .map(blobItem -> container.getBlobClient(blobItem.getName()).getBlobName()) + .filter(name -> !name.contains(META_DIR_NAME)) .collect(toSet()); Set expectedBlobNames = expectedBlobs.stream().map(name -> name + ".txt").collect(toSet()); @@ -253,8 +266,8 @@ private static void assertReadAccessGranted(AzureBlobStoreBackend backend, Set actualBlobContent = actualBlobNames.stream() .map(name -> { try { - return container.getBlockBlobReference(name).downloadText(); - } catch (StorageException | IOException | URISyntaxException e) { + return container.getBlobClient(name).getBlockBlobClient().downloadContent().toString(); + } catch (Exception e) { throw new RuntimeException("Error while reading blob " + name, e); } }) @@ -264,7 +277,8 @@ private static void assertReadAccessGranted(AzureBlobStoreBackend backend, Set 0); } diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderIT.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderIT.java index d07f483ffe5..fe618e785ba 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderIT.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderIT.java @@ -38,6 +38,7 @@ import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStoreUtils.getAzureConfig; import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStoreUtils.getAzureDataStore; import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStoreUtils.isAzureConfigured; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_MULTIPART_UPLOAD_PART_SIZE; import static org.junit.Assume.assumeTrue; /** @@ -76,7 +77,7 @@ protected void doDeleteRecord(DataStore ds, DataIdentifier identifier) throws Da @Override protected long getProviderMaxPartSize() { - return AzureBlobStoreBackend.MAX_MULTIPART_UPLOAD_PART_SIZE; + return MAX_MULTIPART_UPLOAD_PART_SIZE; } @Override diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderTest.java index 85b0ede09ff..ea001c4db33 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderTest.java @@ -18,6 +18,10 @@ */ package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_BINARY_UPLOAD_SIZE; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_MULTIPART_UPLOAD_PART_SIZE; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MAX_SINGLE_PUT_UPLOAD_SIZE; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Constants.MIN_MULTIPART_UPLOAD_PART_SIZE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; @@ -93,19 +97,19 @@ protected void doDeleteRecord(DataStore ds, DataIdentifier identifier) throws Da @Override protected long getProviderMinPartSize() { - return Math.max(0L, AzureBlobStoreBackend.MIN_MULTIPART_UPLOAD_PART_SIZE); + return Math.max(0L, MIN_MULTIPART_UPLOAD_PART_SIZE); } @Override protected long getProviderMaxPartSize() { - return AzureBlobStoreBackend.MAX_MULTIPART_UPLOAD_PART_SIZE; + return MAX_MULTIPART_UPLOAD_PART_SIZE; } @Override - protected long getProviderMaxSinglePutSize() { return AzureBlobStoreBackend.MAX_SINGLE_PUT_UPLOAD_SIZE; } + protected long getProviderMaxSinglePutSize() { return MAX_SINGLE_PUT_UPLOAD_SIZE; } @Override - protected long getProviderMaxBinaryUploadSize() { return AzureBlobStoreBackend.MAX_BINARY_UPLOAD_SIZE; } + protected long getProviderMaxBinaryUploadSize() { return MAX_BINARY_UPLOAD_SIZE; } @Override protected boolean isSinglePutURI(URI uri) { diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreIT.java similarity index 97% rename from oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java rename to oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreIT.java index 632e3c33458..25b81a57d5a 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreIT.java @@ -18,6 +18,7 @@ import static org.apache.commons.codec.binary.Hex.encodeHexString; import static org.apache.commons.io.FileUtils.copyInputStreamToFile; +import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStoreUtils.isAzureConfigured; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -76,12 +77,12 @@ /** * Test {@link AzureDataStore} with AzureDataStore and local cache on. * It requires to pass azure config file via system property or system properties by prefixing with 'ds.'. - * See details @ {@link AzureDataStoreUtils}. + * See details @ {@link TestAzureDataStoreUtils}. * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located at * src/test/resources/azure.properties */ -public class AzureDataStoreTest { - protected static final Logger LOG = LoggerFactory.getLogger(AzureDataStoreTest.class); +public class AzureDataStoreIT { + protected static final Logger LOG = LoggerFactory.getLogger(AzureDataStoreIT.class); @Rule public TemporaryFolder folder = new TemporaryFolder(new File("target")); @@ -89,28 +90,36 @@ public class AzureDataStoreTest { private Properties props; private static byte[] testBuffer = "test".getBytes(); private AzureDataStore ds; - private AzureBlobStoreBackend backend; + private AbstractAzureBlobStoreBackend backend; private String container; Random randomGen = new Random(); + public AzureDataStoreIT() {} + @BeforeClass public static void assumptions() { - assumeTrue(AzureDataStoreUtils.isAzureConfigured()); + assumeTrue(isAzureConfigured()); } @Before - public void setup() throws IOException, RepositoryException, URISyntaxException, InvalidKeyException, StorageException { + public void setup() throws IOException, RepositoryException, URISyntaxException, InvalidKeyException, StorageException, NoSuchFieldException { + + System.setProperty("blob.azure.v12.enabled", "true"); props = AzureDataStoreUtils.getAzureConfig(); - container = String.valueOf(randomGen.nextInt(9999)) + "-" + String.valueOf(randomGen.nextInt(9999)) + container = randomGen.nextInt(9999) + "-" + randomGen.nextInt(9999) + "-test"; props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, container); + props.setProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, AzuriteDockerRule.ACCOUNT_NAME); + props.setProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY, AzuriteDockerRule.ACCOUNT_KEY); ds = new AzureDataStore(); + ds.setProperties(props); ds.setCacheSize(0); // Turn caching off so we don't get weird test results due to caching ds.init(folder.newFolder().getAbsolutePath()); backend = (AzureBlobStoreBackend) ds.getBackend(); + } @After @@ -121,64 +130,10 @@ public void teardown() throws InvalidKeyException, URISyntaxException, StorageEx } catch (Exception ignore) {} } - private void validateRecord(final DataRecord record, - final String contents, - final DataRecord rhs) - throws DataStoreException, IOException { - validateRecord(record, contents, rhs.getIdentifier(), rhs.getLength(), rhs.getLastModified()); - } - - private void validateRecord(final DataRecord record, - final String contents, - final DataIdentifier identifier, - final long length, - final long lastModified) - throws DataStoreException, IOException { - validateRecord(record, contents, identifier, length, lastModified, true); - } - - private void validateRecord(final DataRecord record, - final String contents, - final DataIdentifier identifier, - final long length, - final long lastModified, - final boolean lastModifiedEquals) - throws DataStoreException, IOException { - assertEquals(record.getLength(), length); - if (lastModifiedEquals) { - assertEquals(record.getLastModified(), lastModified); - } else { - assertTrue(record.getLastModified() > lastModified); - } - assertTrue(record.getIdentifier().toString().equals(identifier.toString())); - StringWriter writer = new StringWriter(); - org.apache.commons.io.IOUtils.copy(record.getStream(), writer, "utf-8"); - assertTrue(writer.toString().equals(contents)); - } - - private static InputStream randomStream(int seed, int size) { - Random r = new Random(seed); - byte[] data = new byte[size]; - r.nextBytes(data); - return new ByteArrayInputStream(data); - } - - private static String getIdForInputStream(final InputStream in) - throws NoSuchAlgorithmException, IOException { - MessageDigest digest = MessageDigest.getInstance("SHA-1"); - OutputStream output = new DigestOutputStream(new NullOutputStream(), digest); - try { - IOUtils.copyLarge(in, output); - } finally { - IOUtils.closeQuietly(output); - IOUtils.closeQuietly(in); - } - return encodeHexString(digest.digest()); - } - - @Test public void testCreateAndDeleteBlobHappyPath() throws DataStoreException, IOException { + assumeTrue(isAzureConfigured()); + final DataRecord uploadedRecord = ds.addRecord(new ByteArrayInputStream(testBuffer)); DataIdentifier identifier = uploadedRecord.getIdentifier(); assertTrue(backend.exists(identifier)); @@ -192,7 +147,6 @@ public void testCreateAndDeleteBlobHappyPath() throws DataStoreException, IOExce assertFalse(backend.exists(uploadedRecord.getIdentifier())); } - @Test public void testCreateAndReUploadBlob() throws DataStoreException, IOException { final DataRecord createdRecord = ds.addRecord(new ByteArrayInputStream(testBuffer)); @@ -744,4 +698,59 @@ public void testSecret() throws Exception { assertTrue("refKey in memory not equal to the metadata record", Arrays.equals(refKey, refDirectFromBackend)); } + + private void validateRecord(final DataRecord record, + final String contents, + final DataRecord rhs) + throws DataStoreException, IOException { + validateRecord(record, contents, rhs.getIdentifier(), rhs.getLength(), rhs.getLastModified()); + } + + private void validateRecord(final DataRecord record, + final String contents, + final DataIdentifier identifier, + final long length, + final long lastModified) + throws DataStoreException, IOException { + validateRecord(record, contents, identifier, length, lastModified, true); + } + + private void validateRecord(final DataRecord record, + final String contents, + final DataIdentifier identifier, + final long length, + final long lastModified, + final boolean lastModifiedEquals) + throws DataStoreException, IOException { + assertEquals(record.getLength(), length); + if (lastModifiedEquals) { + assertEquals(record.getLastModified(), lastModified); + } else { + assertTrue(record.getLastModified() > lastModified); + } + assertTrue(record.getIdentifier().toString().equals(identifier.toString())); + StringWriter writer = new StringWriter(); + org.apache.commons.io.IOUtils.copy(record.getStream(), writer, "utf-8"); + assertTrue(writer.toString().equals(contents)); + } + + private static InputStream randomStream(int seed, int size) { + Random r = new Random(seed); + byte[] data = new byte[size]; + r.nextBytes(data); + return new ByteArrayInputStream(data); + } + + private static String getIdForInputStream(final InputStream in) + throws NoSuchAlgorithmException, IOException { + MessageDigest digest = MessageDigest.getInstance("SHA-1"); + OutputStream output = new DigestOutputStream(new NullOutputStream(), digest); + try { + IOUtils.copyLarge(in, output); + } finally { + IOUtils.closeQuietly(output); + IOUtils.closeQuietly(in); + } + return encodeHexString(digest.digest()); + } } diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java index 7c0af90f94b..398ecffa86a 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java @@ -33,9 +33,9 @@ import javax.net.ssl.HttpsURLConnection; +import com.azure.storage.blob.BlobContainerClient; import org.apache.jackrabbit.guava.common.base.Strings; import org.apache.jackrabbit.guava.common.collect.Maps; -import com.microsoft.azure.storage.blob.CloudBlobContainer; import org.apache.commons.io.IOUtils; import org.apache.jackrabbit.core.data.DataStore; import org.apache.jackrabbit.oak.commons.PropertiesUtil; @@ -114,6 +114,8 @@ public static Properties getAzureConfig() { props = new Properties(); props.putAll(filtered); } + + props.setProperty("blob.azure.v12.enabled", "true"); return props; } @@ -142,12 +144,12 @@ public static T setupDirectAccessDataStore( @Nullable final Properties overrideProperties) throws Exception { assumeTrue(isAzureConfigured()); - DataStore ds = (T) getAzureDataStore(getDirectAccessDataStoreProperties(overrideProperties), homeDir.newFolder().getAbsolutePath()); + T ds = (T) getAzureDataStore(getDirectAccessDataStoreProperties(overrideProperties), homeDir.newFolder().getAbsolutePath()); if (ds instanceof ConfigurableDataRecordAccessProvider) { ((ConfigurableDataRecordAccessProvider) ds).setDirectDownloadURIExpirySeconds(directDownloadExpirySeconds); ((ConfigurableDataRecordAccessProvider) ds).setDirectUploadURIExpirySeconds(directUploadExpirySeconds); } - return (T) ds; + return ds; } public static Properties getDirectAccessDataStoreProperties() { @@ -160,7 +162,6 @@ public static Properties getDirectAccessDataStoreProperties(@Nullable final Prop if (null != overrideProperties) { mergedProperties.putAll(overrideProperties); } - // set properties needed for direct access testing if (null == mergedProperties.getProperty("cacheSize", null)) { mergedProperties.put("cacheSize", "0"); @@ -179,7 +180,7 @@ public static void deleteContainer(String containerName) throws Exception { try (AzureBlobContainerProvider azureBlobContainerProvider = AzureBlobContainerProvider.Builder.builder(containerName).initializeWithProperties(props) .build()) { - CloudBlobContainer container = azureBlobContainerProvider.getBlobContainer(); + BlobContainerClient container = azureBlobContainerProvider.getBlobContainer(); boolean result = container.deleteIfExists(); log.info("Container deleted. containerName={} existed={}", containerName, result); } diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzuriteDockerRule.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzuriteDockerRule.java index cb709aca293..04003156c41 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzuriteDockerRule.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzuriteDockerRule.java @@ -16,6 +16,9 @@ */ package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.CloudBlobClient; @@ -38,7 +41,7 @@ public class AzuriteDockerRule extends ExternalResource { - private static final DockerImageName DOCKER_IMAGE_NAME = DockerImageName.parse("mcr.microsoft.com/azure-storage/azurite:3.29.0"); + private static final DockerImageName DOCKER_IMAGE_NAME = DockerImageName.parse("mcr.microsoft.com/azure-storage/azurite:3.31.0"); public static final String ACCOUNT_KEY = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="; public static final String ACCOUNT_NAME = "devstoreaccount1"; private static final AtomicReference STARTUP_EXCEPTION = new AtomicReference<>(); @@ -109,6 +112,17 @@ public CloudBlobContainer getContainer(String name) throws URISyntaxException, S return container; } + public BlobContainerClient getContainer(String containerName, String connectionString) { + BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() + .connectionString(connectionString) + .buildClient(); + + BlobContainerClient blobContainerClient = blobServiceClient.getBlobContainerClient(containerName); + blobContainerClient.deleteIfExists(); + blobContainerClient.create(); + return blobContainerClient; + } + public CloudStorageAccount getCloudStorageAccount() throws URISyntaxException, InvalidKeyException { String blobEndpoint = "BlobEndpoint=" + getBlobEndpoint(); String accountName = "AccountName=" + ACCOUNT_NAME; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java index 14eca1b94a6..aa5aee68b10 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java @@ -27,8 +27,6 @@ import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.slf4j.event.Level; import java.util.Properties; @@ -38,13 +36,12 @@ /** * Test {@link AzureDataStore} with AzureDataStore and local cache on. * It requires to pass azure config file via system property or system properties by prefixing with 'ds.'. - * See details @ {@link AzureDataStoreUtils}. + * See details @ {@link TestAzureDataStoreUtils}. * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located at * src/test/resources/azure.properties */ public class TestAzureDS extends AbstractDataStoreTest { - protected static final Logger LOG = LoggerFactory.getLogger(TestAzureDS.class); protected Properties props = new Properties(); protected String container; @@ -57,7 +54,7 @@ public static void assumptions() { @Before public void setUp() throws Exception { props.putAll(AzureDataStoreUtils.getAzureConfig()); - container = String.valueOf(randomGen.nextInt(9999)) + "-" + String.valueOf(randomGen.nextInt(9999)) + container = randomGen.nextInt(9999) + "-" + randomGen.nextInt(9999) + "-test"; props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, container); props.setProperty("secret", "123456"); diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java index 79072ddd759..159162dec1c 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java @@ -26,7 +26,7 @@ * Test {@link CachingDataStore} with AzureBlobStoreBackend and with very small size (@link * {@link LocalCache}. * It requires to pass azure config file via system property or system properties by prefixing with 'ds.'. - * See details @ {@link AzureDataStoreUtils}. + * See details @ {@link TestAzureDataStoreUtils}. * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located at * src/test/resources/azure.properties diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java index be31b578231..8396f6d41de 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java @@ -24,7 +24,7 @@ * Test {@link org.apache.jackrabbit.core.data.CachingDataStore} with AzureBlobStoreBackend * and local cache Off. * It requires to pass azure config file via system property or system properties by prefixing with 'ds.'. - * See details @ {@link AzureDataStoreUtils}. + * See details @ {@link TestAzureDataStoreUtils}. * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located at * src/test/resources/azure.properties diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/UtilsTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/UtilsTest.java index 5776fbbd379..695a4e2ab06 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/UtilsTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/UtilsTest.java @@ -16,14 +16,30 @@ */ package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.common.policy.RequestRetryOptions; +import org.apache.jackrabbit.core.data.DataStoreException; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileWriter; +import java.io.IOException; import java.util.Properties; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; public class UtilsTest { + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + @Test public void testConnectionStringIsBasedOnProperty() { Properties properties = new Properties(); @@ -77,5 +93,60 @@ public void testConnectionStringSASIsPriority() { String.format("BlobEndpoint=%s;SharedAccessSignature=%s", "endpoint", "sas")); } + @Test + public void testReadConfig() throws IOException { + File tempFile = folder.newFile("test.properties"); + try(FileWriter writer = new FileWriter(tempFile)) { + writer.write("key1=value1\n"); + writer.write("key2=value2\n"); + } + + Properties properties = Utils.readConfig(tempFile.getAbsolutePath()); + assertEquals("value1", properties.getProperty("key1")); + assertEquals("value2", properties.getProperty("key2")); + } + + @Test + public void testReadConfig_exception() { + assertThrows(IOException.class, () -> Utils.readConfig("non-existent-file")); + } + + @Test + public void testGetBlobContainer() throws IOException, DataStoreException { + File tempFile = folder.newFile("azure.properties"); + try (FileWriter writer = new FileWriter(tempFile)) { + writer.write("proxyHost=127.0.0.1\n"); + writer.write("proxyPort=8888\n"); + } + + Properties properties = new Properties(); + properties.load(new FileInputStream(tempFile)); + + String connectionString = Utils.getConnectionString(AzuriteDockerRule.ACCOUNT_NAME, AzuriteDockerRule.ACCOUNT_KEY, "http://127.0.0.1:10000/devstoreaccount1" ); + String containerName = "test-container"; + RequestRetryOptions retryOptions = Utils.getRetryOptions("3", 3, null); + + BlobContainerClient containerClient = Utils.getBlobContainer(connectionString, containerName, retryOptions, properties); + assertNotNull(containerClient); + } + + @Test + public void testGetRetryOptions() { + RequestRetryOptions retryOptions = Utils.getRetryOptions("3", 3, null); + assertNotNull(retryOptions); + assertEquals(3, retryOptions.getMaxTries()); + } + @Test + public void testGetRetryOptionsNoRetry() { + RequestRetryOptions retryOptions = Utils.getRetryOptions("0",3, null); + assertNotNull(retryOptions); + assertEquals(1, retryOptions.getMaxTries()); + } + + @Test + public void testGetRetryOptionsInvalid() { + RequestRetryOptions retryOptions = Utils.getRetryOptions("-1", 3, null); + assertNull(retryOptions); + } } diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8Test.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8Test.java new file mode 100644 index 00000000000..5b676c02607 --- /dev/null +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8Test.java @@ -0,0 +1,293 @@ +package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; +import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy; +import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.guava.common.collect.ImmutableSet; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.jetbrains.annotations.NotNull; +import org.junit.After; +import org.junit.ClassRule; +import org.junit.Test; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.time.Duration; +import java.time.Instant; +import java.util.Date; +import java.util.EnumSet; +import java.util.Properties; +import java.util.Set; +import java.util.stream.StreamSupport; + +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.ADD; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.CREATE; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.LIST; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.READ; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.WRITE; +import static java.util.stream.Collectors.toSet; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeNotNull; + +public class AzureBlobContainerProviderV8Test { + + private static final String AZURE_ACCOUNT_NAME = "AZURE_ACCOUNT_NAME"; + private static final String AZURE_TENANT_ID = "AZURE_TENANT_ID"; + private static final String AZURE_CLIENT_ID = "AZURE_CLIENT_ID"; + private static final String AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET"; + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private static final String CONTAINER_NAME = "blobstore"; + private static final EnumSet READ_ONLY = EnumSet.of(READ, LIST); + private static final EnumSet READ_WRITE = EnumSet.of(READ, LIST, CREATE, WRITE, ADD); + private static final Set BLOBS = Set.of("blob1", "blob2"); + + private CloudBlobContainer container; + + @After + public void tearDown() throws Exception { + if (container != null) { + container.deleteIfExists(); + } + } + + @Test + public void initWithSharedAccessSignature_readOnly() throws Exception { + CloudBlobContainer container = createBlobContainer(); + String sasToken = container.generateSharedAccessSignature(policy(READ_ONLY), null); + + AzureBlobStoreBackendV8 azureBlobStoreBackend = new AzureBlobStoreBackendV8(); + azureBlobStoreBackend.setProperties(getConfigurationWithSasToken(sasToken)); + + azureBlobStoreBackend.init(); + + assertWriteAccessNotGranted(azureBlobStoreBackend); + assertReadAccessGranted(azureBlobStoreBackend, BLOBS); + } + + @Test + public void initWithSharedAccessSignature_readWrite() throws Exception { + CloudBlobContainer container = createBlobContainer(); + String sasToken = container.generateSharedAccessSignature(policy(READ_WRITE), null); + + AzureBlobStoreBackendV8 azureBlobStoreBackend = new AzureBlobStoreBackendV8(); + azureBlobStoreBackend.setProperties(getConfigurationWithSasToken(sasToken)); + + azureBlobStoreBackend.init(); + + assertWriteAccessGranted(azureBlobStoreBackend, "file"); + assertReadAccessGranted(azureBlobStoreBackend, concat(BLOBS, "file")); + } + + @Test + public void connectWithSharedAccessSignatureURL_expired() throws Exception { + CloudBlobContainer container = createBlobContainer(); + SharedAccessBlobPolicy expiredPolicy = policy(READ_WRITE, yesterday()); + String sasToken = container.generateSharedAccessSignature(expiredPolicy, null); + + AzureBlobStoreBackendV8 azureBlobStoreBackend = new AzureBlobStoreBackendV8(); + azureBlobStoreBackend.setProperties(getConfigurationWithSasToken(sasToken)); + + azureBlobStoreBackend.init(); + + assertWriteAccessNotGranted(azureBlobStoreBackend); + assertReadAccessNotGranted(azureBlobStoreBackend); + } + + @Test + public void initWithAccessKey() throws Exception { + AzureBlobStoreBackendV8 azureBlobStoreBackend = new AzureBlobStoreBackendV8(); + azureBlobStoreBackend.setProperties(getConfigurationWithAccessKey()); + + azureBlobStoreBackend.init(); + + assertWriteAccessGranted(azureBlobStoreBackend, "file"); + assertReadAccessGranted(azureBlobStoreBackend, Set.of("file")); + } + + @Test + public void initWithConnectionURL() throws Exception { + AzureBlobStoreBackendV8 azureBlobStoreBackend = new AzureBlobStoreBackendV8(); + azureBlobStoreBackend.setProperties(getConfigurationWithConnectionString()); + + azureBlobStoreBackend.init(); + + assertWriteAccessGranted(azureBlobStoreBackend, "file"); + assertReadAccessGranted(azureBlobStoreBackend, Set.of("file")); + } + + @Test + public void initSecret() throws Exception { + AzureBlobStoreBackendV8 azureBlobStoreBackend = new AzureBlobStoreBackendV8(); + azureBlobStoreBackend.setProperties(getConfigurationWithConnectionString()); + + azureBlobStoreBackend.init(); + assertReferenceSecret(azureBlobStoreBackend); + } + + /* make sure that blob1.txt and blob2.txt are uploaded to AZURE_ACCOUNT_NAME/blobstore container before + * executing this test + * */ + @Test + public void initWithServicePrincipals() throws Exception { + assumeNotNull(getEnvironmentVariable(AZURE_ACCOUNT_NAME)); + assumeNotNull(getEnvironmentVariable(AZURE_TENANT_ID)); + assumeNotNull(getEnvironmentVariable(AZURE_CLIENT_ID)); + assumeNotNull(getEnvironmentVariable(AZURE_CLIENT_SECRET)); + + AzureBlobStoreBackendV8 azureBlobStoreBackend = new AzureBlobStoreBackendV8(); + azureBlobStoreBackend.setProperties(getPropertiesWithServicePrincipals()); + + azureBlobStoreBackend.init(); + + assertWriteAccessGranted(azureBlobStoreBackend, "test"); + assertReadAccessGranted(azureBlobStoreBackend, concat(BLOBS, "test")); + } + + private Properties getPropertiesWithServicePrincipals() { + final String accountName = getEnvironmentVariable(AZURE_ACCOUNT_NAME); + final String tenantId = getEnvironmentVariable(AZURE_TENANT_ID); + final String clientId = getEnvironmentVariable(AZURE_CLIENT_ID); + final String clientSecret = getEnvironmentVariable(AZURE_CLIENT_SECRET); + + Properties properties = new Properties(); + properties.setProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, accountName); + properties.setProperty(AzureConstants.AZURE_TENANT_ID, tenantId); + properties.setProperty(AzureConstants.AZURE_CLIENT_ID, clientId); + properties.setProperty(AzureConstants.AZURE_CLIENT_SECRET, clientSecret); + properties.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, CONTAINER_NAME); + return properties; + } + + private String getEnvironmentVariable(String variableName) { + return System.getenv(variableName); + } + + private CloudBlobContainer createBlobContainer() throws Exception { + container = azurite.getContainer("blobstore"); + for (String blob : BLOBS) { + container.getBlockBlobReference(blob + ".txt").uploadText(blob); + } + return container; + } + + private static Properties getConfigurationWithSasToken(String sasToken) { + Properties properties = getBasicConfiguration(); + properties.setProperty(AzureConstants.AZURE_SAS, sasToken); + properties.setProperty(AzureConstants.AZURE_CREATE_CONTAINER, "false"); + properties.setProperty(AzureConstants.AZURE_REF_ON_INIT, "false"); + return properties; + } + + private static Properties getConfigurationWithAccessKey() { + Properties properties = getBasicConfiguration(); + properties.setProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY, AzuriteDockerRule.ACCOUNT_KEY); + return properties; + } + + @NotNull + private static Properties getConfigurationWithConnectionString() { + Properties properties = getBasicConfiguration(); + properties.setProperty(AzureConstants.AZURE_CONNECTION_STRING, getConnectionString()); + return properties; + } + + @NotNull + private static Properties getBasicConfiguration() { + Properties properties = new Properties(); + properties.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, CONTAINER_NAME); + properties.setProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, AzuriteDockerRule.ACCOUNT_NAME); + properties.setProperty(AzureConstants.AZURE_BLOB_ENDPOINT, azurite.getBlobEndpoint()); + properties.setProperty(AzureConstants.AZURE_CREATE_CONTAINER, ""); + return properties; + } + + @NotNull + private static SharedAccessBlobPolicy policy(EnumSet permissions, Instant expirationTime) { + SharedAccessBlobPolicy sharedAccessBlobPolicy = new SharedAccessBlobPolicy(); + sharedAccessBlobPolicy.setPermissions(permissions); + sharedAccessBlobPolicy.setSharedAccessExpiryTime(Date.from(expirationTime)); + return sharedAccessBlobPolicy; + } + + @NotNull + private static SharedAccessBlobPolicy policy(EnumSet permissions) { + return policy(permissions, Instant.now().plus(Duration.ofDays(7))); + } + + private static void assertReadAccessGranted(AzureBlobStoreBackendV8 backend, Set expectedBlobs) throws Exception { + CloudBlobContainer container = backend.getAzureContainer(); + Set actualBlobNames = StreamSupport.stream(container.listBlobs().spliterator(), false) + .map(blob -> blob.getUri().getPath()) + .map(path -> path.substring(path.lastIndexOf('/') + 1)) + .filter(path -> !path.isEmpty()) + .collect(toSet()); + + Set expectedBlobNames = expectedBlobs.stream().map(name -> name + ".txt").collect(toSet()); + + assertEquals(expectedBlobNames, actualBlobNames); + + Set actualBlobContent = actualBlobNames.stream() + .map(name -> { + try { + return container.getBlockBlobReference(name).downloadText(); + } catch (StorageException | IOException | URISyntaxException e) { + throw new RuntimeException("Error while reading blob " + name, e); + } + }) + .collect(toSet()); + assertEquals(expectedBlobs, actualBlobContent); + } + + private static void assertWriteAccessGranted(AzureBlobStoreBackendV8 backend, String blob) throws Exception { + backend.getAzureContainer() + .getBlockBlobReference(blob + ".txt").uploadText(blob); + } + + private static void assertWriteAccessNotGranted(AzureBlobStoreBackendV8 backend) { + try { + assertWriteAccessGranted(backend, "test.txt"); + fail("Write access should not be granted, but writing to the storage succeeded."); + } catch (Exception e) { + // successful + } + } + + private static void assertReadAccessNotGranted(AzureBlobStoreBackendV8 backend) { + try { + assertReadAccessGranted(backend, BLOBS); + fail("Read access should not be granted, but reading from the storage succeeded."); + } catch (Exception e) { + // successful + } + } + + private static Instant yesterday() { + return Instant.now().minus(Duration.ofDays(1)); + } + + private static Set concat(Set set, String element) { + return ImmutableSet.builder().addAll(set).add(element).build(); + } + + private static String getConnectionString() { + return UtilsV8.getConnectionString(AzuriteDockerRule.ACCOUNT_NAME, AzuriteDockerRule.ACCOUNT_KEY, azurite.getBlobEndpoint()); + } + + private static void assertReferenceSecret(AzureBlobStoreBackendV8 azureBlobStoreBackend) + throws DataStoreException, IOException { + // assert secret already created on init + DataRecord refRec = azureBlobStoreBackend.getMetadataRecord("reference.key"); + assertNotNull("Reference data record null", refRec); + assertTrue("reference key is empty", refRec.getLength() > 0); + } + +} \ No newline at end of file diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8Test.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8Test.java new file mode 100644 index 00000000000..a1ece06c5de --- /dev/null +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8Test.java @@ -0,0 +1,65 @@ +package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; +import org.junit.Test; + +import java.util.Properties; + +import static org.junit.Assert.assertEquals; + +public class UtilsV8Test { + + @Test + public void testConnectionStringIsBasedOnProperty() { + Properties properties = new Properties(); + properties.put(AzureConstants.AZURE_CONNECTION_STRING, "DefaultEndpointsProtocol=https;AccountName=accountName;AccountKey=accountKey"); + String connectionString = UtilsV8.getConnectionStringFromProperties(properties); + assertEquals(connectionString,"DefaultEndpointsProtocol=https;AccountName=accountName;AccountKey=accountKey"); + } + + @Test + public void testConnectionStringIsBasedOnSAS() { + Properties properties = new Properties(); + properties.put(AzureConstants.AZURE_SAS, "sas"); + properties.put(AzureConstants.AZURE_BLOB_ENDPOINT, "endpoint"); + String connectionString = UtilsV8.getConnectionStringFromProperties(properties); + assertEquals(connectionString, + String.format("BlobEndpoint=%s;SharedAccessSignature=%s", "endpoint", "sas")); + } + + @Test + public void testConnectionStringIsBasedOnSASWithoutEndpoint() { + Properties properties = new Properties(); + properties.put(AzureConstants.AZURE_SAS, "sas"); + properties.put(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, "account"); + String connectionString = UtilsV8.getConnectionStringFromProperties(properties); + assertEquals(connectionString, + String.format("AccountName=%s;SharedAccessSignature=%s", "account", "sas")); + } + + @Test + public void testConnectionStringIsBasedOnAccessKeyIfSASMissing() { + Properties properties = new Properties(); + properties.put(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, "accessKey"); + properties.put(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY, "secretKey"); + + String connectionString = UtilsV8.getConnectionStringFromProperties(properties); + assertEquals(connectionString, + String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s","accessKey","secretKey")); + } + + @Test + public void testConnectionStringSASIsPriority() { + Properties properties = new Properties(); + properties.put(AzureConstants.AZURE_SAS, "sas"); + properties.put(AzureConstants.AZURE_BLOB_ENDPOINT, "endpoint"); + + properties.put(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, "accessKey"); + properties.put(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY, "secretKey"); + + String connectionString = UtilsV8.getConnectionStringFromProperties(properties); + assertEquals(connectionString, + String.format("BlobEndpoint=%s;SharedAccessSignature=%s", "endpoint", "sas")); + } + +} \ No newline at end of file diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/AzureDataStoreFixture.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/AzureDataStoreFixture.java index 84bf6f2c82d..337a2f409a5 100644 --- a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/AzureDataStoreFixture.java +++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/AzureDataStoreFixture.java @@ -28,6 +28,7 @@ import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStore; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Utils; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8.UtilsV8; import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; import org.apache.jackrabbit.oak.jcr.binary.fixtures.nodestore.FixtureUtils; import org.jetbrains.annotations.NotNull; @@ -94,7 +95,7 @@ public DataStore createDataStore() { String connectionString = Utils.getConnectionStringFromProperties(azProps); try { - CloudBlobContainer container = Utils.getBlobContainer(connectionString, containerName); + CloudBlobContainer container = UtilsV8.getBlobContainer(connectionString, containerName); container.createIfNotExists(); // create new properties since azProps is shared for all created DataStores diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java index b58463ed3cb..1f2db842c70 100644 --- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java +++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java @@ -32,6 +32,7 @@ import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureBlobContainerProvider; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStore; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8.AzureBlobContainerProviderV8; import org.apache.jackrabbit.oak.blob.cloud.s3.S3Constants; import org.apache.jackrabbit.oak.blob.cloud.s3.S3DataStore; import org.apache.jackrabbit.oak.blob.cloud.s3.Utils; @@ -188,7 +189,7 @@ private static CloudBlobContainer getCloudBlobContainer(@NotNull Map return null; } - try (AzureBlobContainerProvider azureBlobContainerProvider = AzureBlobContainerProvider.Builder.builder(containerName) + try (AzureBlobContainerProviderV8 azureBlobContainerProvider = AzureBlobContainerProviderV8.Builder.builder(containerName) .withAzureConnectionString(azureConnectionString) .withAccountName(accountName) .withClientId(clientId) diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/fixture/DataStoreUtilsTest.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/fixture/DataStoreUtilsTest.java index 56e6d1f17fd..32200dc4ba7 100644 --- a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/fixture/DataStoreUtilsTest.java +++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/fixture/DataStoreUtilsTest.java @@ -29,6 +29,7 @@ import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureBlobContainerProvider; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8.AzureBlobContainerProviderV8; import org.jetbrains.annotations.NotNull; import org.junit.After; import org.junit.Assume; @@ -151,7 +152,7 @@ public void delete_container_service_principal() throws Exception { Assume.assumeNotNull(tenantId); CloudBlobContainer container; - try (AzureBlobContainerProvider azureBlobContainerProvider = AzureBlobContainerProvider.Builder.builder(CONTAINER_NAME) + try (AzureBlobContainerProviderV8 azureBlobContainerProvider = AzureBlobContainerProviderV8.Builder.builder(CONTAINER_NAME) .withAccountName(accountName) .withClientId(clientId) .withClientSecret(clientSecret) diff --git a/oak-run-elastic/pom.xml b/oak-run-elastic/pom.xml index cfb10b05b78..a732804475d 100644 --- a/oak-run-elastic/pom.xml +++ b/oak-run-elastic/pom.xml @@ -42,7 +42,7 @@ 105 MB: Azure updates 107 MB: RDB/Tomcat (OAK-10752) --> - 112197632 + 113453200