diff --git a/oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java b/oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java index b2f2f075213..b1e36a8fd9e 100644 --- a/oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java +++ b/oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java @@ -53,7 +53,7 @@ private FixturesHelper() { } * default fixtures when no {@code nsfixtures} system property is provided */ public enum Fixture { - DOCUMENT_NS, @Deprecated SEGMENT_MK, DOCUMENT_RDB, MEMORY_NS, DOCUMENT_MEM, SEGMENT_TAR, SEGMENT_AWS, SEGMENT_AZURE, COMPOSITE_SEGMENT, COMPOSITE_MEM, COW_DOCUMENT + DOCUMENT_NS, @Deprecated SEGMENT_MK, DOCUMENT_RDB, MEMORY_NS, DOCUMENT_MEM, SEGMENT_TAR, SEGMENT_AWS, SEGMENT_AZURE_V8, SEGMENT_AZURE, COMPOSITE_SEGMENT, COMPOSITE_MEM, COW_DOCUMENT } private static final Set FIXTURES; diff --git a/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java b/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java index 6afe976f6d8..b482254c7d1 100644 --- a/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java +++ b/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java @@ -33,6 +33,7 @@ import org.apache.jackrabbit.oak.composite.CompositeSegmentStoreFixture; import org.apache.jackrabbit.oak.segment.aws.fixture.SegmentAwsFixture; import org.apache.jackrabbit.oak.segment.azure.fixture.SegmentAzureFixture; +import org.apache.jackrabbit.oak.segment.azure.fixture.SegmentAzureFixtureV8; import org.apache.jackrabbit.oak.segment.fixture.SegmentTarFixture; public class NodeStoreFixtures { @@ -43,6 +44,8 @@ public class NodeStoreFixtures { public static final NodeStoreFixture SEGMENT_AWS = new SegmentAwsFixture(); + public static final NodeStoreFixture SEGMENT_AZURE_V8 = new SegmentAzureFixtureV8(); + public static final NodeStoreFixture SEGMENT_AZURE = new SegmentAzureFixture(); public static final NodeStoreFixture DOCUMENT_NS = new DocumentMongoFixture(); @@ -80,6 +83,9 @@ public static Collection asJunitParameters(Set if (fixtures.contains(FixturesHelper.Fixture.SEGMENT_AZURE)) { configuredFixtures.add(SEGMENT_AZURE); } + if (fixtures.contains(FixturesHelper.Fixture.SEGMENT_AZURE_V8)) { + configuredFixtures.add(SEGMENT_AZURE_V8); + } if (fixtures.contains(FixturesHelper.Fixture.COMPOSITE_SEGMENT)) { configuredFixtures.add(COMPOSITE_SEGMENT); } diff --git a/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java b/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java index 3ab54920160..4bef092e915 100644 --- a/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java +++ b/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java @@ -459,7 +459,7 @@ public void moveToDescendant() { if (fixture == NodeStoreFixtures.SEGMENT_TAR || fixture == NodeStoreFixtures.MEMORY_NS || fixture == NodeStoreFixtures.COMPOSITE_MEM || fixture == NodeStoreFixtures.COMPOSITE_SEGMENT || fixture == NodeStoreFixtures.COW_DOCUMENT || fixture == NodeStoreFixtures.SEGMENT_AWS - || fixture == NodeStoreFixtures.SEGMENT_AZURE) { + || fixture == NodeStoreFixtures.SEGMENT_AZURE_V8 || fixture == NodeStoreFixtures.SEGMENT_AZURE) { assertTrue(x.moveTo(x, "xx")); assertFalse(x.exists()); assertFalse(test.hasChildNode("x")); diff --git a/oak-parent/pom.xml b/oak-parent/pom.xml index b30d7c29d85..3b8a8ca6257 100644 --- a/oak-parent/pom.xml +++ b/oak-parent/pom.xml @@ -669,6 +669,11 @@ jackson-dataformat-smile ${jackson.version} + + com.fasterxml.jackson.dataformat + jackson-dataformat-xml + ${jackson.version} + org.apache.httpcomponents httpclient diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java index 062c1d8666a..9a539cd3cc0 100644 --- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java +++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java @@ -44,7 +44,7 @@ import org.apache.jackrabbit.oak.segment.aws.AwsContext; import org.apache.jackrabbit.oak.segment.aws.AwsPersistence; import org.apache.jackrabbit.oak.segment.aws.Configuration; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; @@ -289,7 +289,7 @@ public Oak getOak(int clusterId) throws Exception { CloudBlobContainer container = cloud.createCloudBlobClient().getContainerReference(azureContainerName); container.createIfNotExists(); CloudBlobDirectory directory = container.getDirectoryReference(azureRootPath); - fileStoreBuilder.withCustomPersistence(new AzurePersistence(directory)); + fileStoreBuilder.withCustomPersistence(new AzurePersistenceV8(directory)); } BlobStore blobStore = null; @@ -341,7 +341,7 @@ public Oak[] setUpCluster(int n, StatisticsProvider statsProvider) throws Except container.createIfNotExists(); containers[i] = container; CloudBlobDirectory directory = container.getDirectoryReference(azureRootPath + "/primary-" + i); - builder.withCustomPersistence(new AzurePersistence(directory)); + builder.withCustomPersistence(new AzurePersistenceV8(directory)); } if (blobStore != null) { diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/SegmentTarFixtureProvider.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/SegmentTarFixtureProvider.java index d3720c0fc1b..0528230d01d 100644 --- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/SegmentTarFixtureProvider.java +++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/SegmentTarFixtureProvider.java @@ -30,7 +30,7 @@ import org.apache.jackrabbit.guava.common.io.Closer; import org.apache.commons.io.FileUtils; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; @@ -53,12 +53,12 @@ static NodeStore configureSegment(Options options, BlobStore blobStore, Whiteboa FileStoreBuilder builder; if (segmentStoreType == ToolUtils.SegmentStoreType.AZURE) { - final AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager(); + final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); SegmentNodeStorePersistence segmentNodeStorePersistence = - ToolUtils.newSegmentNodeStorePersistence(segmentStoreType, pathOrUri, azureStorageCredentialManager); + ToolUtils.newSegmentNodeStorePersistence(segmentStoreType, pathOrUri, azureStorageCredentialManagerV8); File tempDir = Files.createTempDirectory("azure-segment-store").toFile(); closer.register(() -> FileUtils.deleteQuietly(tempDir)); - closer.register(azureStorageCredentialManager); + closer.register(azureStorageCredentialManagerV8); builder = fileStoreBuilder(tempDir).withCustomPersistence(segmentNodeStorePersistence); } else { builder = fileStoreBuilder(new File(pathOrUri)).withMaxFileSize(256); diff --git a/oak-run-elastic/pom.xml b/oak-run-elastic/pom.xml index cfb10b05b78..348e6d54d7d 100644 --- a/oak-run-elastic/pom.xml +++ b/oak-run-elastic/pom.xml @@ -42,7 +42,7 @@ 105 MB: Azure updates 107 MB: RDB/Tomcat (OAK-10752) --> - 112197632 + 113039632 diff --git a/oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/AzureSegmentStoreExplorerBackend.java b/oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/AzureSegmentStoreExplorerBackend.java index 5f5b1580a00..c81082d24c2 100644 --- a/oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/AzureSegmentStoreExplorerBackend.java +++ b/oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/AzureSegmentStoreExplorerBackend.java @@ -19,7 +19,7 @@ package org.apache.jackrabbit.oak.explorer; import org.apache.jackrabbit.guava.common.io.Files; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; @@ -39,16 +39,16 @@ public class AzureSegmentStoreExplorerBackend extends AbstractSegmentTarExplorerBackend { private final String path; private SegmentNodeStorePersistence persistence; - private final AzureStorageCredentialManager azureStorageCredentialManager; + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; public AzureSegmentStoreExplorerBackend(String path) { this.path = path; - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } @Override public void open() throws IOException { - this.persistence = newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, path, azureStorageCredentialManager); + this.persistence = newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, path, azureStorageCredentialManagerV8); try { this.store = fileStoreBuilder(Files.createTempDir()) @@ -63,7 +63,7 @@ public void open() throws IOException { @Override public void close() { super.close(); - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); } @Override diff --git a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java index 0d2ee2c6a5f..e3c94654d75 100644 --- a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java +++ b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java @@ -28,7 +28,7 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; import org.apache.jackrabbit.oak.run.commons.Command; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; @@ -87,8 +87,8 @@ public void execute(String... args) throws Exception { } } else { if (pathOrURI.startsWith("az:")) { - try (AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager()) { - SegmentNodeStorePersistence azurePersistence = ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, pathOrURI, azureStorageCredentialManager); + try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { + SegmentNodeStorePersistence azurePersistence = ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, pathOrURI, azureStorageCredentialManagerV8); ReadOnlyFileStore store = fileStoreBuilder(Files.createTempDir()).withCustomPersistence(azurePersistence).withBlobStore(newBasicReadOnlyBlobStore()).buildReadOnly(); statusCode = Diff.builder() .withPath(pathOrURI) diff --git a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java index f0cc1d8d1fd..5e359ff6193 100644 --- a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java +++ b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java @@ -75,8 +75,8 @@ import org.apache.jackrabbit.oak.run.cli.BlobStoreOptions.Type; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions; import org.apache.jackrabbit.oak.segment.file.FileStore; @@ -1132,7 +1132,7 @@ class SegmentStoreFixture implements StoreFixture { class AzureSegmentStoreFixture extends SegmentStoreFixture { private static final String AZURE_DIR = "repository"; private String container; - private final AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager(); + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); @Override public NodeStore init(DataStoreBlobStore blobStore, File storeFile) throws Exception { Properties props = AzureDataStoreUtils.getAzureConfig(); @@ -1142,14 +1142,14 @@ class AzureSegmentStoreFixture extends SegmentStoreFixture { container = container + System.currentTimeMillis(); // Create the azure segment container String connectionString = getAzureConnectionString(accessKey, secretKey, container, AZURE_DIR); - AzureUtilities.cloudBlobDirectoryFrom(connectionString, container, AZURE_DIR); + AzureUtilitiesV8.cloudBlobDirectoryFrom(connectionString, container, AZURE_DIR); // get the azure uri expected by the command storePath = getAzureUri(accessKey, container, AZURE_DIR); // initialize azure segment for test setup SegmentNodeStorePersistence segmentNodeStorePersistence = - ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, storePath, azureStorageCredentialManager); + ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, storePath, azureStorageCredentialManagerV8); fileStore = fileStoreBuilder(storeFile).withBlobStore(blobStore) .withCustomPersistence(segmentNodeStorePersistence).build(); @@ -1181,7 +1181,7 @@ protected String getAzureConnectionString(String accountName, String secret, Str public void after() { try { AzureDataStoreUtils.deleteContainer(container); - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); } catch(Exception e) { log.error("Error in cleaning the container {}", container, e); } diff --git a/oak-segment-azure/pom.xml b/oak-segment-azure/pom.xml index 2d7a386d134..17196b7c21a 100644 --- a/oak-segment-azure/pom.xml +++ b/oak-segment-azure/pom.xml @@ -48,10 +48,12 @@ org.apache.jackrabbit.oak.segment.remote*, com.fasterxml.jackson.annotation;resolution:=optional, com.fasterxml.jackson.databind*;resolution:=optional, - com.fasterxml.jackson.dataformat.xml;resolution:=optional, + com.fasterxml.jackson.dataformat.*;resolution:=optional, com.fasterxml.jackson.datatype*;resolution:=optional, com.azure.identity.broker.implementation;resolution:=optional, - com.azure.xml;resolution:=optional, + com.azure.storage.blob*;resolution:=optional, + com.azure.storage.common*;resolution:=optional, + com.azure.storage.internal*;resolution:=optional, com.microsoft.aad.msal4jextensions*;resolution:=optional, com.nimbusds.langtag;resolution:=optional, com.sun.jna*;resolution:=optional, @@ -61,6 +63,7 @@ io.netty.channel.epoll;resolution:=optional, io.netty.handler.codec.*;resolution:=optional, org.objectweb.asm;resolution:=optional, + org.codehaus.stax2;resolution:=optional, !org.apache.jackrabbit.oak.segment*, !com.google.*, !android.os, @@ -71,11 +74,16 @@ org.apache.jackrabbit.oak.segment.azure, org.apache.jackrabbit.oak.segment.azure.queue, org.apache.jackrabbit.oak.segment.azure.util, + com.fasterxml.jackson.dataformat.xml, + com.fasterxml.jackson.dataformat.xml.deser, com.microsoft.azure.storage, com.microsoft.azure.storage.core, com.microsoft.azure.storage.blob, com.azure.core.credential, - com.azure.identity + com.azure.identity, + com.azure.blob, + com.azure.storage.common, + com.azure.storage.internal.avro.implementation azure-storage, @@ -83,6 +91,11 @@ azure-core, azure-identity, azure-json, + azure-xml, + azure-storage-blob, + azure-storage-common, + azure-storage-internal-avro, + jackson-dataformat-xml, guava, jsr305, reactive-streams, @@ -96,7 +109,8 @@ json-smart, content-type, accessors-smart, - nimbus-jose-jwt + nimbus-jose-jwt, + stax2-api <_plugin /> @@ -187,6 +201,33 @@ com.microsoft.azure azure-keyvault-core + + org.codehaus.woodstox + stax2-api + 4.2.2 + + + + + com.azure + azure-storage-blob + 12.25.3 + + + com.azure + azure-storage-common + 12.24.3 + + + com.azure + azure-storage-internal-avro + 12.10.3 + + + com.azure + azure-xml + 1.0.0 + @@ -365,6 +406,12 @@ junit test + + io.projectreactor + reactor-test + 3.6.10 + test + org.testcontainers testcontainers diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java index be710fab3a9..a2d6c4e9084 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java @@ -16,17 +16,19 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.CopyStatus; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import com.azure.core.util.polling.PollResponse; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobCopyInfo; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.models.CopyStatusType; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.segment.remote.RemoteUtilities; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; import org.jetbrains.annotations.NotNull; @@ -34,38 +36,43 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.net.URISyntaxException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collections; -import java.util.EnumSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.UUID; import java.util.Set; +import java.time.Duration; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; -import java.util.stream.StreamSupport; +import static com.azure.storage.blob.models.BlobType.BLOCK_BLOB; import static org.apache.jackrabbit.oak.commons.conditions.Validate.checkArgument; import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.getName; public class AzureArchiveManager implements SegmentArchiveManager { - private static final Logger log = LoggerFactory.getLogger(AzureSegmentArchiveReader.class); + private static final Logger log = LoggerFactory.getLogger(AzureArchiveManager.class); + + protected final BlobContainerClient readBlobContainerClient; - protected final CloudBlobDirectory cloudBlobDirectory; + protected final BlobContainerClient writeBlobContainerClient; + + protected final String rootPrefix; protected final IOMonitor ioMonitor; protected final FileStoreMonitor monitor; private WriteAccessController writeAccessController; - public AzureArchiveManager(CloudBlobDirectory segmentstoreDirectory, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, WriteAccessController writeAccessController) { - this.cloudBlobDirectory = segmentstoreDirectory; + public AzureArchiveManager(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String rootPrefix, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, WriteAccessController writeAccessController) { + this.readBlobContainerClient = readBlobContainerClient; + this.writeBlobContainerClient = writeBlobContainerClient; + this.rootPrefix = rootPrefix; this.ioMonitor = ioMonitor; this.monitor = fileStoreMonitor; this.writeAccessController = writeAccessController; @@ -74,13 +81,10 @@ public AzureArchiveManager(CloudBlobDirectory segmentstoreDirectory, IOMonitor i @Override public List listArchives() throws IOException { try { - List archiveNames = StreamSupport.stream(cloudBlobDirectory - .listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), null, null) - .spliterator(), false) - .filter(i -> i instanceof CloudBlobDirectory) - .map(i -> (CloudBlobDirectory) i) - .filter(i -> getName(i).endsWith(".tar")) - .map(CloudBlobDirectory::getPrefix) + List archiveNames = readBlobContainerClient.listBlobsByHierarchy(rootPrefix + "/").stream() + .filter(BlobItem::isPrefix) + .filter(blobItem -> blobItem.getName().endsWith(".tar") || blobItem.getName().endsWith(".tar/")) + .map(BlobItem::getName) .map(Paths::get) .map(Path::getFileName) .map(Path::toString) @@ -95,7 +99,7 @@ public List listArchives() throws IOException { } } return archiveNames; - } catch (URISyntaxException | StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -105,44 +109,46 @@ public List listArchives() throws IOException { * @param archiveName * @return true if the archive is empty (no 0000.* segment) */ - private boolean isArchiveEmpty(String archiveName) throws IOException, URISyntaxException, StorageException { - return !getDirectory(archiveName).listBlobs("0000.").iterator().hasNext(); + private boolean isArchiveEmpty(String archiveName) throws BlobStorageException { + String fullBlobPrefix = String.format("%s/%s", getDirectory(archiveName), "0000."); + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(fullBlobPrefix); + return !readBlobContainerClient.listBlobs(listBlobsOptions, null).iterator().hasNext(); } @Override public SegmentArchiveReader open(String archiveName) throws IOException { try { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - if (!archiveDirectory.getBlockBlobReference("closed").exists()) { + String closedBlob = String.format("%s/%s", getDirectory(archiveName), "closed"); + if (!readBlobContainerClient.getBlobClient(closedBlob).exists()) { return null; } - return new AzureSegmentArchiveReader(archiveDirectory, ioMonitor); - } catch (StorageException | URISyntaxException e) { + return new AzureSegmentArchiveReader(readBlobContainerClient, rootPrefix, archiveName, ioMonitor); + } catch (BlobStorageException e) { throw new IOException(e); } } @Override public SegmentArchiveReader forceOpen(String archiveName) throws IOException { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - return new AzureSegmentArchiveReader(archiveDirectory, ioMonitor); + return new AzureSegmentArchiveReader(readBlobContainerClient, rootPrefix, archiveName, ioMonitor); } @Override public SegmentArchiveWriter create(String archiveName) throws IOException { - return new AzureSegmentArchiveWriter(getDirectory(archiveName), ioMonitor, monitor, writeAccessController); + return new AzureSegmentArchiveWriter(writeBlobContainerClient, rootPrefix, archiveName, ioMonitor, monitor, writeAccessController); } @Override public boolean delete(String archiveName) { try { getBlobs(archiveName) - .forEach(cloudBlob -> { + .forEach(blobItem -> { try { writeAccessController.checkWritingAllowed(); - cloudBlob.delete(); - } catch (StorageException e) { - log.error("Can't delete segment {}", cloudBlob.getUri().getPath(), e); + writeBlobContainerClient.getBlobClient(blobItem.getName()).delete(); + } catch (BlobStorageException e) { + log.error("Can't delete segment {}", blobItem.getName(), e); } }); return true; @@ -155,14 +161,14 @@ public boolean delete(String archiveName) { @Override public boolean renameTo(String from, String to) { try { - CloudBlobDirectory targetDirectory = getDirectory(to); + String targetDirectory = getDirectory(to); getBlobs(from) - .forEach(cloudBlob -> { + .forEach(blobItem -> { try { writeAccessController.checkWritingAllowed(); - renameBlob(cloudBlob, targetDirectory); + renameBlob(blobItem, targetDirectory); } catch (IOException e) { - log.error("Can't rename segment {}", cloudBlob.getUri().getPath(), e); + log.error("Can't rename segment {}", blobItem.getName(), e); } }); return true; @@ -174,13 +180,13 @@ public boolean renameTo(String from, String to) { @Override public void copyFile(String from, String to) throws IOException { - CloudBlobDirectory targetDirectory = getDirectory(to); + String targetDirectory = getDirectory(to); getBlobs(from) - .forEach(cloudBlob -> { + .forEach(blobItem -> { try { - copyBlob(cloudBlob, targetDirectory); + copyBlob(blobItem, targetDirectory); } catch (IOException e) { - log.error("Can't copy segment {}", cloudBlob.getUri().getPath(), e); + log.error("Can't copy segment {}", blobItem.getName(), e); } }); } @@ -188,8 +194,10 @@ public void copyFile(String from, String to) throws IOException { @Override public boolean exists(String archiveName) { try { - return getDirectory(archiveName).listBlobsSegmented(null, false, null, 1, null, null, null).getLength() > 0; - } catch (IOException | StorageException | URISyntaxException e) { + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(getDirectory(archiveName)); + return readBlobContainerClient.listBlobs(listBlobsOptions, null).iterator().hasNext(); + } catch (BlobStorageException e) { log.error("Can't check the existence of {}", archiveName, e); return false; } @@ -200,7 +208,7 @@ public void recoverEntries(String archiveName, LinkedHashMap entri Pattern pattern = Pattern.compile(RemoteUtilities.SEGMENT_FILE_NAME_PATTERN); List entryList = new ArrayList<>(); - for (CloudBlob b : getBlobs(archiveName)) { + for (BlobItem b : getBlobs(archiveName)) { String name = getName(b); Matcher m = pattern.matcher(name); if (!m.matches()) { @@ -208,12 +216,12 @@ public void recoverEntries(String archiveName, LinkedHashMap entri } int position = Integer.parseInt(m.group(1), 16); UUID uuid = UUID.fromString(m.group(2)); - long length = b.getProperties().getLength(); + long length = b.getProperties().getContentLength(); if (length > 0) { - byte[] data = new byte[(int) length]; + byte[] data; try { - b.downloadToByteArray(data, 0); - } catch (StorageException e) { + data = readBlobContainerClient.getBlobClient(b.getName()).downloadContent().toBytes(); + } catch (BlobStorageException e) { throw new IOException(e); } entryList.add(new RecoveredEntry(position, uuid, data, name)); @@ -234,13 +242,13 @@ public void recoverEntries(String archiveName, LinkedHashMap entri } private void delete(String archiveName, Set recoveredEntries) throws IOException { - getBlobs(archiveName) - .forEach(cloudBlob -> { - if (!recoveredEntries.contains(RemoteUtilities.getSegmentUUID(getName(cloudBlob)))) { + getBlobs(archiveName + "/") + .forEach(blobItem -> { + if (!recoveredEntries.contains(RemoteUtilities.getSegmentUUID(getName(blobItem)))) { try { - cloudBlob.delete(); - } catch (StorageException e) { - log.error("Can't delete segment {}", cloudBlob.getUri().getPath(), e); + writeBlobContainerClient.getBlobClient(blobItem.getName()).delete(); + } catch (BlobStorageException e) { + log.error("Can't delete segment {}", blobItem.getName(), e); } } }); @@ -257,51 +265,42 @@ public void backup(@NotNull String archiveName, @NotNull String backupArchiveNam delete(archiveName, recoveredEntries); } - protected CloudBlobDirectory getDirectory(String archiveName) throws IOException { - try { - return cloudBlobDirectory.getDirectoryReference(archiveName); - } catch (URISyntaxException e) { - throw new IOException(e); - } + protected String getDirectory(String archiveName) { + return String.format("%s/%s", rootPrefix, archiveName); } - private List getBlobs(String archiveName) throws IOException { - return AzureUtilities.getBlobs(getDirectory(archiveName)); + private List getBlobs(String archiveName) throws IOException { + String archivePath = getDirectory(archiveName); + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(archivePath); + + return AzureUtilities.getBlobs(readBlobContainerClient, listBlobsOptions); } - private void renameBlob(CloudBlob blob, CloudBlobDirectory newParent) throws IOException { + private void renameBlob(BlobItem blob, String newParent) throws IOException { copyBlob(blob, newParent); try { - blob.delete(); - } catch (StorageException e) { + writeBlobContainerClient.getBlobClient(blob.getName()).delete(); + } catch (BlobStorageException e) { throw new IOException(e); } } - private void copyBlob(CloudBlob blob, CloudBlobDirectory newParent) throws IOException { - checkArgument(blob instanceof CloudBlockBlob, "Only page blobs are supported for the rename"); - try { - String blobName = getName(blob); - CloudBlockBlob newBlob = newParent.getBlockBlobReference(blobName); - newBlob.startCopy(blob.getUri()); - - boolean isStatusPending = true; - while (isStatusPending) { - newBlob.downloadAttributes(); - if (newBlob.getCopyState().getStatus() == CopyStatus.PENDING) { - Thread.sleep(100); - } else { - isStatusPending = false; - } - } + private void copyBlob(BlobItem blob, String newParent) throws IOException { + checkArgument(blob.getProperties().getBlobType() == BLOCK_BLOB, "Only page blobs are supported for the rename"); - CopyStatus finalStatus = newBlob.getCopyState().getStatus(); - if (newBlob.getCopyState().getStatus() != CopyStatus.SUCCESS) { - throw new IOException("Invalid copy status for " + blob.getUri().getPath() + ": " + finalStatus); - } - } catch (StorageException | InterruptedException | URISyntaxException e) { - throw new IOException(e); + BlockBlobClient sourceBlobClient = readBlobContainerClient.getBlobClient(blob.getName()).getBlockBlobClient(); + + String destinationBlob = String.format("%s/%s", newParent, AzureUtilities.getName(blob)); + BlockBlobClient destinationBlobClient = writeBlobContainerClient.getBlobClient(destinationBlob).getBlockBlobClient(); + + PollResponse response = destinationBlobClient.beginCopy(sourceBlobClient.getBlobUrl(), Duration.ofMillis(100)).waitForCompletion(); + + String finalStatus = response.getValue().getCopyStatus().toString(); + if (response.getValue().getCopyStatus() != CopyStatusType.SUCCESS) { + throw new IOException("Invalid copy status for " + blob.getName() + ": " + finalStatus); } + } private static class RecoveredEntry implements Comparable { diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalFile.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalFile.java index c33094750b7..6f0094b59d1 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalFile.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalFile.java @@ -16,34 +16,32 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.AppendBlobClient; import org.apache.commons.io.IOUtils; import org.apache.jackrabbit.oak.segment.spi.persistence.GCJournalFile; import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.List; public class AzureGCJournalFile implements GCJournalFile { - private final CloudAppendBlob gcJournal; + private final AppendBlobClient gcJournal; - public AzureGCJournalFile(CloudAppendBlob gcJournal) { + public AzureGCJournalFile(AppendBlobClient gcJournal) { this.gcJournal = gcJournal; } @Override public void writeLine(String line) throws IOException { try { - if (!gcJournal.exists()) { - gcJournal.createOrReplace(); - } - gcJournal.appendText(line + "\n", StandardCharsets.UTF_8.name(), null, null, null); - } catch (StorageException e) { + String appendLine = line + "\n"; + gcJournal.createIfNotExists(); + gcJournal.appendBlock(new ByteArrayInputStream((appendLine).getBytes()), appendLine.length()); + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -54,10 +52,9 @@ public List readLines() throws IOException { if (!gcJournal.exists()) { return Collections.emptyList(); } - byte[] data = new byte[(int) gcJournal.getProperties().getLength()]; - gcJournal.downloadToByteArray(data, 0); + byte[] data = gcJournal.downloadContent().toBytes(); return IOUtils.readLines(new ByteArrayInputStream(data), Charset.defaultCharset()); - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -65,10 +62,8 @@ public List readLines() throws IOException { @Override public void truncate() throws IOException { try { - if (gcJournal.exists()) { - gcJournal.delete(); - } - } catch (StorageException e) { + gcJournal.deleteIfExists(); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java new file mode 100644 index 00000000000..0c59c5d3439 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpResponse; +import com.azure.core.http.policy.HttpPipelinePolicy; +import org.apache.jackrabbit.guava.common.base.Stopwatch; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; + +public class AzureHttpRequestLoggingPolicy implements HttpPipelinePolicy { + + private static final Logger log = LoggerFactory.getLogger(AzureHttpRequestLoggingPolicy.class); + + private final boolean verboseEnabled = Boolean.getBoolean("segment.azure.v12.http.verbose.enabled"); + + private RemoteStoreMonitor remoteStoreMonitor; + + public void setRemoteStoreMonitor(RemoteStoreMonitor remoteStoreMonitor) { + log.info("Enable Azure Remote store Monitor"); + this.remoteStoreMonitor = remoteStoreMonitor; + } + + @Override + public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + Stopwatch stopwatch = Stopwatch.createStarted(); + + return next.process().flatMap(httpResponse -> { + if (remoteStoreMonitor != null) { + remoteStoreMonitor.requestDuration(stopwatch.elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS); + if (httpResponse.getStatusCode() > 201) { + remoteStoreMonitor.requestError(); + } else { + remoteStoreMonitor.requestCount(); + } + } + + if (verboseEnabled) { + log.info("HTTP Request: {} {} {} {}ms", context.getHttpRequest().getHttpMethod(), context.getHttpRequest().getUrl(), httpResponse.getStatusCode(), (stopwatch.elapsed(TimeUnit.NANOSECONDS))/1_000_000); + } + + return Mono.just(httpResponse); + }); + } + +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java index 18da8df6ef1..df2817da2aa 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java @@ -16,16 +16,14 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import org.apache.jackrabbit.guava.common.collect.ImmutableList; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.ListBlobItem; -import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; -import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.BlobType; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.specialized.AppendBlobClient; import org.apache.jackrabbit.oak.commons.collections.CollectionUtils; -import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; +import org.apache.jackrabbit.guava.common.collect.ImmutableList; import org.apache.jackrabbit.oak.segment.azure.util.CaseInsensitiveKeysMapAccess; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; @@ -34,15 +32,16 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.ByteArrayInputStream; import java.io.IOException; -import java.net.URISyntaxException; -import java.util.ArrayList; import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.HashMap; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; public class AzureJournalFile implements JournalFile { @@ -50,7 +49,9 @@ public class AzureJournalFile implements JournalFile { private static final int JOURNAL_LINE_LIMIT = Integer.getInteger("org.apache.jackrabbit.oak.segment.azure.journal.lines", 40_000); - private final CloudBlobDirectory directory; + private final BlobContainerClient readBlobContainerClient; + + private final BlobContainerClient writeBlobContainerClient; private final String journalNamePrefix; @@ -58,20 +59,21 @@ public class AzureJournalFile implements JournalFile { private final WriteAccessController writeAccessController; - AzureJournalFile(CloudBlobDirectory directory, String journalNamePrefix, WriteAccessController writeAccessController, int lineLimit) { - this.directory = directory; + AzureJournalFile(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String journalNamePrefix, WriteAccessController writeAccessController, int lineLimit) { + this.readBlobContainerClient = readBlobContainerClient; + this.writeBlobContainerClient = writeBlobContainerClient; this.journalNamePrefix = journalNamePrefix; this.lineLimit = lineLimit; this.writeAccessController = writeAccessController; } - public AzureJournalFile(CloudBlobDirectory directory, String journalNamePrefix, WriteAccessController writeAccessController) { - this(directory, journalNamePrefix, writeAccessController, JOURNAL_LINE_LIMIT); + public AzureJournalFile(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String journalNamePrefix, WriteAccessController writeAccessController) { + this(readBlobContainerClient, writeBlobContainerClient, journalNamePrefix, writeAccessController, JOURNAL_LINE_LIMIT); } @Override public JournalFileReader openJournalReader() throws IOException { - return new CombinedReader(getJournalBlobs()); + return new CombinedReader(readBlobContainerClient, getJournalBlobs()); } @Override @@ -98,26 +100,25 @@ private String getJournalFileName(int index) { return String.format("%s.%03d", journalNamePrefix, index); } - private List getJournalBlobs() throws IOException { + private List getJournalBlobs() throws IOException { try { - List result = new ArrayList<>(); - for (ListBlobItem b : directory.listBlobs(journalNamePrefix)) { - if (b instanceof CloudAppendBlob) { - result.add((CloudAppendBlob) b); - } else { - log.warn("Invalid blob type: {} {}", b.getUri(), b.getClass()); - } - } - result.sort(Comparator.comparing(AzureUtilities::getName).reversed()); + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(journalNamePrefix); + List result = readBlobContainerClient.listBlobs(listBlobsOptions, null).stream() + .filter(blobItem -> blobItem.getProperties().getBlobType().equals(BlobType.APPEND_BLOB)) + .collect(Collectors.toList()); + result.sort(Comparator.comparing(AzureUtilities::getName).reversed()); return result; - } catch (URISyntaxException | StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } private static class AzureJournalReader implements JournalFileReader { - private final CloudBlob blob; + private final BlobContainerClient blobContainerClient; + + private final BlobItem blob; private ReverseFileReader reader; @@ -125,7 +126,8 @@ private static class AzureJournalReader implements JournalFileReader { private boolean firstLineReturned; - private AzureJournalReader(CloudBlob blob) { + private AzureJournalReader(BlobContainerClient blobContainerClient, BlobItem blob) { + this.blobContainerClient = blobContainerClient; this.blob = blob; } @@ -134,19 +136,18 @@ public String readLine() throws IOException { if (reader == null) { try { if (!metadataFetched) { - blob.downloadAttributes(); - metadataFetched = true; Map metadata = CaseInsensitiveKeysMapAccess.convert(blob.getMetadata()); + metadataFetched = true; if (metadata.containsKey("lastEntry")) { firstLineReturned = true; return metadata.get("lastEntry"); } } - reader = new ReverseFileReader(blob); + reader = new ReverseFileReader(blobContainerClient, blob); if (firstLineReturned) { - while("".equals(reader.readLine())); // the first line was already returned, let's fast-forward it + while ("".equals(reader.readLine())); // the first line was already returned, let's fast-forward it } - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -160,33 +161,24 @@ public void close() throws IOException { private class AzureJournalWriter implements JournalFileWriter { - private CloudAppendBlob currentBlob; + private AppendBlobClient currentBlob; private int lineCount; - private final BlobRequestOptions writeOptimisedBlobRequestOptions; - public AzureJournalWriter() throws IOException { - writeOptimisedBlobRequestOptions = AzureRequestOptions.optimiseForWriteOperations(directory.getServiceClient().getDefaultRequestOptions()); - - List blobs = getJournalBlobs(); + List blobs = getJournalBlobs(); if (blobs.isEmpty()) { try { - currentBlob = directory.getAppendBlobReference(getJournalFileName(1)); - currentBlob.createOrReplace(); - currentBlob.downloadAttributes(); - } catch (URISyntaxException | StorageException e) { + currentBlob = writeBlobContainerClient.getBlobClient(getJournalFileName(1)).getAppendBlobClient(); + currentBlob.createIfNotExists(); + } catch (BlobStorageException e) { throw new IOException(e); } } else { - currentBlob = blobs.get(0); - } - try { - currentBlob.downloadAttributes(); - } catch (StorageException e) { - throw new IOException(e); + currentBlob = writeBlobContainerClient.getBlobClient(blobs.get(0).getName()).getAppendBlobClient(); } - String lc = currentBlob.getMetadata().get("lineCount"); + + String lc = currentBlob.getProperties().getMetadata().get("lineCount"); lineCount = lc == null ? 0 : Integer.parseInt(lc); } @@ -195,12 +187,11 @@ public void truncate() throws IOException { try { writeAccessController.checkWritingAllowed(); - for (CloudAppendBlob cloudAppendBlob : getJournalBlobs()) { - cloudAppendBlob.delete(DeleteSnapshotsOption.NONE, null, writeOptimisedBlobRequestOptions, null); + for (BlobItem blobItem : getJournalBlobs()) { + writeBlobContainerClient.getBlobClient(blobItem.getName()).delete(); } - createNextFile(0); - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -221,9 +212,9 @@ public void batchWriteLines(List lines) throws IOException { List firstBlock = lines.subList(0, firstBlockSize); List> remainingBlocks = CollectionUtils.partitionList(lines.subList(firstBlockSize, lines.size()), lineLimit); List> allBlocks = ImmutableList.>builder() - .addAll(firstBlock.isEmpty() ? ImmutableList.of() : ImmutableList.of(firstBlock)) - .addAll(remainingBlocks) - .build(); + .addAll(firstBlock.isEmpty() ? ImmutableList.of() : ImmutableList.of(firstBlock)) + .addAll(remainingBlocks) + .build(); for (List entries : allBlocks) { if (lineCount >= lineLimit) { @@ -235,12 +226,15 @@ public void batchWriteLines(List lines) throws IOException { text.append(line).append("\n"); } try { - currentBlob.appendText(text.toString(), null, null, writeOptimisedBlobRequestOptions, null); - currentBlob.getMetadata().put("lastEntry", entries.get(entries.size() - 1)); + currentBlob.appendBlock(new ByteArrayInputStream(text.toString().getBytes()), text.length()); + Map metadata = new HashMap<>(currentBlob.getProperties().getMetadata()); + metadata.put("lastEntry", entries.get(entries.size() - 1)); + lineCount += entries.size(); - currentBlob.getMetadata().put("lineCount", Integer.toString(lineCount)); - currentBlob.uploadMetadata(null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { + + metadata.put("lineCount", Integer.toString(lineCount)); + currentBlob.setMetadata(metadata); + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -248,17 +242,17 @@ public void batchWriteLines(List lines) throws IOException { private void createNextFile(int suffix) throws IOException { try { - currentBlob = directory.getAppendBlobReference(getJournalFileName(suffix + 1)); - currentBlob.createOrReplace(null, writeOptimisedBlobRequestOptions, null); + currentBlob = writeBlobContainerClient.getBlobClient(getJournalFileName(suffix + 1)).getAppendBlobClient(); + currentBlob.createIfNotExists(); lineCount = 0; - } catch (URISyntaxException | StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } private int parseCurrentSuffix() { String name = AzureUtilities.getName(currentBlob); - Pattern pattern = Pattern.compile(Pattern.quote(journalNamePrefix) + "\\.(\\d+)" ); + Pattern pattern = Pattern.compile(Pattern.quote(journalNamePrefix) + "\\.(\\d+)"); Matcher matcher = pattern.matcher(name); int parsedSuffix; if (matcher.find()) { @@ -288,8 +282,8 @@ private static class CombinedReader implements JournalFileReader { private JournalFileReader currentReader; - private CombinedReader(List blobs) { - readers = blobs.stream().map(AzureJournalReader::new).iterator(); + private CombinedReader(BlobContainerClient blobContainerClient, List blobs) { + readers = blobs.stream().map(blobItem -> new AzureJournalReader(blobContainerClient, blobItem)).iterator(); } @Override diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFile.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFile.java index aae72c12003..35c2accd890 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFile.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFile.java @@ -16,8 +16,9 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,9 +32,9 @@ public class AzureManifestFile implements ManifestFile { private static final Logger log = LoggerFactory.getLogger(AzureManifestFile.class); - private final CloudBlockBlob manifestBlob; + private final BlockBlobClient manifestBlob; - public AzureManifestFile(CloudBlockBlob manifestBlob) { + public AzureManifestFile(BlockBlobClient manifestBlob) { this.manifestBlob = manifestBlob; } @@ -41,7 +42,7 @@ public AzureManifestFile(CloudBlockBlob manifestBlob) { public boolean exists() { try { return manifestBlob.exists(); - } catch (StorageException e) { + } catch (BlobStorageException e) { log.error("Can't check if the manifest exists", e); return false; } @@ -51,14 +52,12 @@ public boolean exists() { public Properties load() throws IOException { Properties properties = new Properties(); if (exists()) { - long length = manifestBlob.getProperties().getLength(); - byte[] data = new byte[(int) length]; try { - manifestBlob.downloadToByteArray(data, 0); - } catch (StorageException e) { + byte[] data = manifestBlob.downloadContent().toBytes(); + properties.load(new ByteArrayInputStream(data)); + } catch (BlobStorageException e) { throw new IOException(e); } - properties.load(new ByteArrayInputStream(data)); } return properties; } @@ -70,8 +69,8 @@ public void save(Properties properties) throws IOException { byte[] data = bos.toByteArray(); try { - manifestBlob.uploadFromByteArray(data, 0, data.length); - } catch (StorageException e) { + manifestBlob.upload(BinaryData.fromBytes(data), true); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java index dc41fbff1e4..a0f0ddded5a 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java @@ -16,23 +16,13 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import java.io.IOException; -import java.net.URISyntaxException; -import java.nio.file.Paths; -import java.util.Date; -import java.util.EnumSet; -import java.util.concurrent.TimeUnit; - -import com.microsoft.azure.storage.OperationContext; -import com.microsoft.azure.storage.RequestCompletedEvent; -import com.microsoft.azure.storage.StorageEvent; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.ListBlobItem; -import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.AppendBlobClient; +import com.azure.storage.blob.specialized.BlobLeaseClient; +import com.azure.storage.blob.specialized.BlobLeaseClientBuilder; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; @@ -46,39 +36,48 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; + public class AzurePersistence implements SegmentNodeStorePersistence { private static final Logger log = LoggerFactory.getLogger(AzurePersistence.class); - protected final CloudBlobDirectory segmentstoreDirectory; + protected final BlobContainerClient readBlobContainerClient; + + protected final BlobContainerClient writeBlobContainerClient; + + protected final BlobContainerClient noRetryBlobContainerClient; + + protected final String rootPrefix; + + protected AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy; protected WriteAccessController writeAccessController = new WriteAccessController(); - public AzurePersistence(CloudBlobDirectory segmentStoreDirectory) { - this.segmentstoreDirectory = segmentStoreDirectory; + public AzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, BlobContainerClient noRetryBlobContainerClient, String rootPrefix) { + this(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, rootPrefix, null); + } - AzureRequestOptions.applyDefaultRequestOptions(segmentStoreDirectory.getServiceClient().getDefaultRequestOptions()); + public AzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, BlobContainerClient noRetryBlobContainerClient, String rootPrefix, AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy) { + this.readBlobContainerClient = readBlobContainerClient; + this.writeBlobContainerClient = writeBlobContainerClient; + this.noRetryBlobContainerClient = noRetryBlobContainerClient; + this.azureHttpRequestLoggingPolicy = azureHttpRequestLoggingPolicy; + this.rootPrefix = rootPrefix; } @Override public SegmentArchiveManager createArchiveManager(boolean mmap, boolean offHeapAccess, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { attachRemoteStoreMonitor(remoteStoreMonitor); - return new AzureArchiveManager(segmentstoreDirectory, ioMonitor, fileStoreMonitor, writeAccessController); + return new AzureArchiveManager(readBlobContainerClient, writeBlobContainerClient, rootPrefix, ioMonitor, fileStoreMonitor, writeAccessController); } @Override public boolean segmentFilesExist() { try { - for (ListBlobItem i : segmentstoreDirectory.listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), null, null)) { - if (i instanceof CloudBlobDirectory) { - CloudBlobDirectory dir = (CloudBlobDirectory) i; - String name = Paths.get(dir.getPrefix()).getFileName().toString(); - if (name.endsWith(".tar")) { - return true; - } - } - } - return false; - } catch (StorageException | URISyntaxException e) { + return readBlobContainerClient.listBlobsByHierarchy(rootPrefix + "/").stream() + .filter(BlobItem::isPrefix) + .anyMatch(blobItem -> blobItem.getName().endsWith(".tar") || blobItem.getName().endsWith(".tar/")); + } catch (BlobStorageException e) { log.error("Can't check if the segment archives exists", e); return false; } @@ -86,7 +85,7 @@ public boolean segmentFilesExist() { @Override public JournalFile getJournalFile() { - return new AzureJournalFile(segmentstoreDirectory, "journal.log", writeAccessController); + return new AzureJournalFile(readBlobContainerClient, writeBlobContainerClient, rootPrefix + "/journal.log", writeAccessController); } @Override @@ -101,55 +100,45 @@ public ManifestFile getManifestFile() throws IOException { @Override public RepositoryLock lockRepository() throws IOException { - return new AzureRepositoryLock(getBlockBlob("repo.lock"), () -> { + BlockBlobClient blockBlobClient = getBlockBlob("repo.lock"); + BlockBlobClient noRetryBlockBlobClient = getNoRetryBlockBlob("repo.lock"); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(noRetryBlockBlobClient).buildClient(); + return new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> { log.warn("Lost connection to the Azure. The client will be closed."); // TODO close the connection }, writeAccessController).lock(); } - private CloudBlockBlob getBlockBlob(String path) throws IOException { + private BlockBlobClient getBlockBlob(String path) throws IOException { try { - return segmentstoreDirectory.getBlockBlobReference(path); - } catch (URISyntaxException | StorageException e) { + return readBlobContainerClient.getBlobClient(rootPrefix + "/" + path).getBlockBlobClient(); + } catch (BlobStorageException e) { throw new IOException(e); } } - private CloudAppendBlob getAppendBlob(String path) throws IOException { + private BlockBlobClient getNoRetryBlockBlob(String path) throws IOException { try { - return segmentstoreDirectory.getAppendBlobReference(path); - } catch (URISyntaxException | StorageException e) { + return noRetryBlobContainerClient.getBlobClient(rootPrefix + "/" + path).getBlockBlobClient(); + } catch (BlobStorageException e) { throw new IOException(e); } } - private static void attachRemoteStoreMonitor(RemoteStoreMonitor remoteStoreMonitor) { - OperationContext.getGlobalRequestCompletedEventHandler().addListener(new StorageEvent() { - - @Override - public void eventOccurred(RequestCompletedEvent e) { - Date startDate = e.getRequestResult().getStartDate(); - Date stopDate = e.getRequestResult().getStopDate(); - - if (startDate != null && stopDate != null) { - long requestDuration = stopDate.getTime() - startDate.getTime(); - remoteStoreMonitor.requestDuration(requestDuration, TimeUnit.MILLISECONDS); - } - - Exception exception = e.getRequestResult().getException(); - - if (exception == null) { - remoteStoreMonitor.requestCount(); - } else { - remoteStoreMonitor.requestError(); - } - } + private AppendBlobClient getAppendBlob(String path) throws IOException { + try { + return readBlobContainerClient.getBlobClient(rootPrefix + "/" + path).getAppendBlobClient(); + } catch (BlobStorageException e) { + throw new IOException(e); + } + } - }); + private void attachRemoteStoreMonitor(RemoteStoreMonitor remoteStoreMonitor) { + if (azureHttpRequestLoggingPolicy != null) {azureHttpRequestLoggingPolicy.setRemoteStoreMonitor(remoteStoreMonitor);} } - public CloudBlobDirectory getSegmentstoreDirectory() { - return segmentstoreDirectory; + public BlobContainerClient getReadBlobContainerClient() { + return readBlobContainerClient; } public void setWriteAccessController(WriteAccessController writeAccessController) { diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java new file mode 100644 index 00000000000..12fee4c31c3 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.identity.ClientSecretCredential; +import com.azure.identity.ClientSecretCredentialBuilder; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.common.policy.RequestRetryOptions; +import org.apache.commons.lang3.StringUtils; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; +import org.apache.jackrabbit.oak.segment.azure.util.Environment; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.*; + +public class AzurePersistenceManager { + + private static final Logger log = LoggerFactory.getLogger(AzurePersistenceManager.class); + + private AzurePersistenceManager() { + } + + public static AzurePersistence createAzurePersistenceFrom(@NotNull String accountName, @NotNull String containerName, @NotNull String rootPrefix, @NotNull Environment environment) throws IOException { + final String clientId = environment.getVariable(AZURE_CLIENT_ID); + final String clientSecret = environment.getVariable(AZURE_CLIENT_SECRET); + final String tenantId = environment.getVariable(AZURE_TENANT_ID); + + if (StringUtils.isNoneBlank(clientId, clientSecret, tenantId)) { + try { + return createPersistenceFromServicePrincipalCredentials(accountName, containerName, rootPrefix, clientId, clientSecret, tenantId, false, false); + } catch (IllegalArgumentException | StringIndexOutOfBoundsException e) { + log.error("Error occurred while connecting to Azure Storage using service principals: ", e); + throw new IllegalArgumentException( + "Could not connect to the Azure Storage. Please verify if AZURE_CLIENT_ID, AZURE_CLIENT_SECRET and AZURE_TENANT_ID environment variables are correctly set!"); + } + } + + log.warn("AZURE_CLIENT_ID, AZURE_CLIENT_SECRET and AZURE_TENANT_ID environment variables empty or missing. Switching to authentication with AZURE_SECRET_KEY."); + + String key = environment.getVariable(AZURE_SECRET_KEY); + try { + return createPersistenceFromAccessKey(accountName, containerName, key, null, rootPrefix, false, false); + } catch (IllegalArgumentException | StringIndexOutOfBoundsException e) { + log.error("Error occurred while connecting to Azure Storage using secret key: ", e); + throw new IllegalArgumentException( + "Could not connect to the Azure Storage. Please verify if AZURE_SECRET_KEY environment variable is correctly set!"); + } + } + + public static AzurePersistence createAzurePersistenceFrom(Configuration configuration) throws IOException { + if (!StringUtils.isBlank(configuration.connectionURL())) { + return createPersistenceFromConnectionURL(configuration); + } + if (!StringUtils.isAnyBlank(configuration.clientId(), configuration.clientSecret(), configuration.tenantId())) { + return createPersistenceFromServicePrincipalCredentials(configuration); + } + if (!StringUtils.isBlank(configuration.sharedAccessSignature())) { + return createPersistenceFromSasUri(configuration); + } + return createPersistenceFromAccessKey(configuration); + } + + private static AzurePersistence createPersistenceFromAccessKey(Configuration configuration) throws IOException { + return createPersistenceFromAccessKey(configuration.accountName(), configuration.containerName(), configuration.accessKey(), configuration.blobEndpoint(), configuration.rootPath(), configuration.enableSecondaryLocation(), true); + } + + private static AzurePersistence createPersistenceFromAccessKey(String accountName, String containerName, String accessKey, String blobEndpoint, String rootPrefix, boolean enableSecondaryLocation, boolean createContainer) throws IOException { + StringBuilder connectionString = new StringBuilder(); + connectionString.append("DefaultEndpointsProtocol=https;"); + connectionString.append("AccountName=").append(accountName).append(';'); + connectionString.append("AccountKey=").append(accessKey).append(';'); + if (!StringUtils.isBlank(blobEndpoint)) { + connectionString.append("BlobEndpoint=").append(blobEndpoint).append(';'); + } + return createAzurePersistence(connectionString.toString(), accountName, containerName, rootPrefix, enableSecondaryLocation, createContainer); + } + + @NotNull + private static AzurePersistence createPersistenceFromConnectionURL(Configuration configuration) throws IOException { + return createAzurePersistence(configuration.connectionURL(), configuration, true); + } + + private static AzurePersistence createPersistenceFromSasUri(Configuration configuration) throws IOException { + StringBuilder connectionString = new StringBuilder(); + connectionString.append("DefaultEndpointsProtocol=https;"); + connectionString.append("AccountName=").append(configuration.accountName()).append(';'); + connectionString.append("SharedAccessSignature=").append(configuration.sharedAccessSignature()).append(';'); + if (!StringUtils.isBlank(configuration.blobEndpoint())) { + connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); + } + return createAzurePersistence(connectionString.toString(), configuration, false); + } + + + @NotNull + private static AzurePersistence createPersistenceFromServicePrincipalCredentials(Configuration configuration) throws IOException { + return createPersistenceFromServicePrincipalCredentials(configuration.accountName(), configuration.containerName(), configuration.rootPath(), configuration.clientId(), configuration.clientSecret(), configuration.tenantId(), configuration.enableSecondaryLocation(), true); + } + + private static AzurePersistence createPersistenceFromServicePrincipalCredentials(String accountName, String containerName, String rootPrefix, String clientId, String clientSecret, String tenantId, boolean enableSecondaryLocation, boolean createContainer) throws IOException { + AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy = new AzureHttpRequestLoggingPolicy(); + + ClientSecretCredential clientSecretCredential = new ClientSecretCredentialBuilder() + .clientId(clientId) + .clientSecret(clientSecret) + .tenantId(tenantId) + .build(); + + RequestRetryOptions retryOptions = readRequestRetryOptions(enableSecondaryLocation, accountName); + BlobContainerClient blobContainerClient = getBlobContainerClient(accountName, containerName, retryOptions, azureHttpRequestLoggingPolicy, clientSecretCredential); + + RequestRetryOptions writeRetryOptions = AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations(); + BlobContainerClient writeContainerClient = getBlobContainerClient(accountName, containerName, writeRetryOptions, azureHttpRequestLoggingPolicy, clientSecretCredential); + + BlobContainerClient noRetryBlobContainerClient = getBlobContainerClient(accountName, containerName, null, azureHttpRequestLoggingPolicy, clientSecretCredential); + + if (createContainer) { + blobContainerClient.createIfNotExists(); + } + + final String rootPrefixNormalized = normalizePath(rootPrefix); + + return new AzurePersistence(blobContainerClient, writeContainerClient, noRetryBlobContainerClient, rootPrefixNormalized, azureHttpRequestLoggingPolicy); + } + + @NotNull + private static AzurePersistence createAzurePersistence(String connectionString, Configuration configuration, boolean createContainer) throws IOException { + return createAzurePersistence(connectionString, configuration.accountName(), configuration.containerName(), configuration.rootPath(), configuration.enableSecondaryLocation(), createContainer); + } + + @NotNull + private static AzurePersistence createAzurePersistence(String connectionString, String accountName, String containerName, String rootPrefix, boolean enableSecondaryLocation, boolean createContainer) throws IOException { + try { + AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy = new AzureHttpRequestLoggingPolicy(); + + RequestRetryOptions retryOptions = readRequestRetryOptions(enableSecondaryLocation, accountName); + BlobContainerClient blobContainerClient = getBlobContainerClient(accountName, containerName, retryOptions, azureHttpRequestLoggingPolicy, connectionString); + + RequestRetryOptions writeRetryOptions = AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations(); + BlobContainerClient writeBlobContainerClient = getBlobContainerClient(accountName, containerName, writeRetryOptions, azureHttpRequestLoggingPolicy, connectionString); + + BlobContainerClient noRetryBlobContainerClient = getBlobContainerClient(accountName, containerName, null, azureHttpRequestLoggingPolicy, connectionString); + + if (createContainer) { + blobContainerClient.createIfNotExists(); + } + + final String rootPrefixNormalized = normalizePath(rootPrefix); + + return new AzurePersistence(blobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, rootPrefixNormalized, azureHttpRequestLoggingPolicy); + } catch (BlobStorageException e) { + throw new IOException(e); + } + } + + private static BlobContainerClient getBlobContainerClient(String accountName, String containerName, RequestRetryOptions requestRetryOptions, AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy, String connectionString) { + BlobServiceClient blobServiceClient = blobServiceClientBuilder(accountName, requestRetryOptions, azureHttpRequestLoggingPolicy) + .connectionString(connectionString) + .buildClient(); + + return blobServiceClient.getBlobContainerClient(containerName); + } + + private static BlobContainerClient getBlobContainerClient(String accountName, String containerName, RequestRetryOptions requestRetryOptions, AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy, ClientSecretCredential clientSecretCredential) { + BlobServiceClient blobServiceClient = blobServiceClientBuilder(accountName, requestRetryOptions, azureHttpRequestLoggingPolicy) + .credential(clientSecretCredential) + .buildClient(); + + return blobServiceClient.getBlobContainerClient(containerName); + } + + private static BlobServiceClientBuilder blobServiceClientBuilder(String accountName, RequestRetryOptions requestRetryOptions, AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy) { + String endpoint = String.format("https://%s.blob.core.windows.net", accountName); + + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() + .endpoint(endpoint) + .addPolicy(azureHttpRequestLoggingPolicy); + + if (requestRetryOptions != null) { + builder.retryOptions(requestRetryOptions); + } + + return builder; + } + + private static RequestRetryOptions readRequestRetryOptions(boolean enableSecondaryLocation, String accountName) { + RequestRetryOptions retryOptions = AzureRequestOptions.getRetryOptionsDefault(); + if (enableSecondaryLocation) { + String endpointSecondaryRegion = String.format("https://%s-secondary.blob.core.windows.net", accountName); + retryOptions = AzureRequestOptions.getRetryOptionsDefault(endpointSecondaryRegion); + } + return retryOptions; + } + + @NotNull + private static String normalizePath(@NotNull String rootPath) { + if (!rootPath.isEmpty() && rootPath.charAt(0) == '/') { + return rootPath.substring(1); + } + return rootPath; + } + +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java index 6c50f9e121c..e9a04518c56 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java @@ -16,20 +16,19 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.AccessCondition; -import com.microsoft.azure.storage.Constants; -import com.microsoft.azure.storage.RetryNoRetry; -import com.microsoft.azure.storage.StorageErrorCode; -import com.microsoft.azure.storage.StorageErrorCodeStrings; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.azure.core.http.RequestConditions; +import com.azure.core.util.Context; +import com.azure.storage.blob.models.BlobErrorCode; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.BlobLeaseClient; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.time.Duration; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -53,7 +52,9 @@ public class AzureRepositoryLock implements RepositoryLock { private final Runnable shutdownHook; - private final CloudBlockBlob blob; + private final BlockBlobClient blockBlobClient; + + private final BlobLeaseClient leaseClient; private final Thread refresherThread; @@ -67,13 +68,14 @@ public class AzureRepositoryLock implements RepositoryLock { private static final String REFRESHER_THREAD_NAME = "AzureRepositoryLock-Refresher"; private boolean inError; - public AzureRepositoryLock(CloudBlockBlob blob, Runnable shutdownHook, WriteAccessController writeAccessController) { - this(blob, shutdownHook, writeAccessController, TIMEOUT_SEC); + public AzureRepositoryLock(BlockBlobClient blockBlobClient, BlobLeaseClient leaseClient, Runnable shutdownHook, WriteAccessController writeAccessController) { + this(blockBlobClient, leaseClient, shutdownHook, writeAccessController, TIMEOUT_SEC); } - public AzureRepositoryLock(CloudBlockBlob blob, Runnable shutdownHook, WriteAccessController writeAccessController, int timeoutSec) { + public AzureRepositoryLock(BlockBlobClient blockBlobClient, BlobLeaseClient leaseClient, Runnable shutdownHook, WriteAccessController writeAccessController, int timeoutSec) { this.shutdownHook = shutdownHook; - this.blob = blob; + this.blockBlobClient = blockBlobClient; + this.leaseClient = leaseClient; this.refresherThread = new Thread(this::refreshLease, REFRESHER_THREAD_NAME); this.refresherThread.setDaemon(true); this.timeoutSec = timeoutSec; @@ -90,13 +92,13 @@ public AzureRepositoryLock lock() throws IOException { Exception ex = null; do { try { - blob.openOutputStream().close(); + blockBlobClient.getBlobOutputStream().close(); log.info("{} = {}", LEASE_DURATION_PROP, leaseDuration); log.info("{} = {}", RENEWAL_INTERVAL_PROP, renewalInterval); log.info("{} = {}", TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, timeToWaitBeforeWriteBlock); - leaseId = blob.acquireLease(leaseDuration, null); + leaseId = leaseClient.acquireLease(leaseDuration); writeAccessController.enableWriting(); log.info("Acquired lease {}", leaseId); } catch (Exception e) { @@ -130,48 +132,39 @@ private void refreshLease() { long lastUpdate = 0; setInError(false); while (doUpdate) { + long timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; try { - long timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; - try { - if (timeSinceLastUpdate > renewalInterval) { - - BlobRequestOptions requestOptions = new BlobRequestOptions(); - requestOptions.setMaximumExecutionTimeInMs(LEASE_RENEWAL_TIMEOUT_MS); - requestOptions.setRetryPolicyFactory(new RetryNoRetry()); - blob.renewLease(AccessCondition.generateLeaseCondition(leaseId), requestOptions, null); - - writeAccessController.enableWriting(); - if (isInError()) { - log.info("Lease renewal successful again."); - setInError(false); - } - lastUpdate = System.currentTimeMillis(); - } - } catch (Exception e) { - timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; + if (timeSinceLastUpdate > renewalInterval) { + leaseId = leaseClient.renewLeaseWithResponse((RequestConditions) null, Duration.ofMillis(LEASE_RENEWAL_TIMEOUT_MS), Context.NONE).getValue(); - if (timeSinceLastUpdate > timeToWaitBeforeWriteBlock) { - writeAccessController.disableWriting(); + writeAccessController.enableWriting(); + if (isInError()) { + log.info("Lease renewal successful again."); + setInError(false); } + lastUpdate = System.currentTimeMillis(); + } + } catch (Exception e) { + timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; - if (e instanceof StorageException) { - StorageException storageException = (StorageException) e; - if (Set.of(StorageErrorCodeStrings.OPERATION_TIMED_OUT - , StorageErrorCode.SERVICE_INTERNAL_ERROR - , StorageErrorCodeStrings.SERVER_BUSY - , StorageErrorCodeStrings.INTERNAL_ERROR).contains(storageException.getErrorCode())) { - log.warn("Could not renew the lease due to the operation timeout or service unavailability. Retry in progress ...", e); - } else if (storageException.getHttpStatusCode() == Constants.HeaderConstants.HTTP_UNUSED_306) { - log.warn("Client side error. Retry in progress ...", e); - } else { - log.warn("Could not renew lease due to storage exception. Retry in progress ... ", e); - } + if (timeSinceLastUpdate > timeToWaitBeforeWriteBlock) { + writeAccessController.disableWriting(); + } + + if (e instanceof BlobStorageException) { + BlobStorageException storageException = (BlobStorageException) e; + if (Set.of(BlobErrorCode.OPERATION_TIMED_OUT, + BlobErrorCode.SERVER_BUSY, + BlobErrorCode.INTERNAL_ERROR).contains(storageException.getErrorCode())) { + log.warn("Could not renew the lease due to the operation timeout or service unavailability. Retry in progress ...", e); } else { - log.error("Can't renew the lease", e); - shutdownHook.run(); - doUpdate = false; - return; + log.warn("Could not renew lease due to storage exception. Retry in progress ... ", e); } + } else { + log.error("Can't renew the lease", e); + shutdownHook.run(); + doUpdate = false; + return; } waitABit(100); } catch (Throwable t) { @@ -199,11 +192,11 @@ public void unlock() throws IOException { private void releaseLease() throws IOException { try { - blob.releaseLease(AccessCondition.generateLeaseCondition(leaseId)); - blob.delete(); + leaseClient.releaseLease(); + blockBlobClient.delete(); log.info("Released lease {}", leaseId); leaseId = null; - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveReader.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveReader.java index e6c636e9345..b5566f2f8c1 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveReader.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveReader.java @@ -16,33 +16,38 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.readBufferFully; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.specialized.BlockBlobClient; +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; import java.io.File; import java.io.IOException; -import java.net.URISyntaxException; import java.util.Map; import java.util.UUID; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; - -import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveReader; -import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.readBufferFully; public class AzureSegmentArchiveReader extends AbstractRemoteSegmentArchiveReader { - private final CloudBlobDirectory archiveDirectory; + private final BlobContainerClient blobContainerClient; private final long length; - AzureSegmentArchiveReader(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor) throws IOException { + private final String archiveName; + + private final String archivePath; + + AzureSegmentArchiveReader(BlobContainerClient blobContainerClient, String rootPrefix, String archiveName, IOMonitor ioMonitor) throws IOException { super(ioMonitor); - this.archiveDirectory = archiveDirectory; + this.blobContainerClient = blobContainerClient; + this.archiveName = archiveName; + this.archivePath = String.format("%s/%s", rootPrefix, archiveName); this.length = computeArchiveIndexAndLength(); } @@ -53,19 +58,21 @@ public long length() { @Override public String getName() { - return AzureUtilities.getName(archiveDirectory); + return archiveName; } @Override protected long computeArchiveIndexAndLength() throws IOException { long length = 0; - for (CloudBlob blob : AzureUtilities.getBlobs(archiveDirectory)) { + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(archivePath + "/"); + for (BlobItem blob : AzureUtilities.getBlobs(blobContainerClient, listBlobsOptions)) { Map metadata = blob.getMetadata(); if (AzureBlobMetadata.isSegment(metadata)) { - RemoteSegmentArchiveEntry indexEntry = AzureBlobMetadata.toIndexEntry(metadata, (int) blob.getProperties().getLength()); + RemoteSegmentArchiveEntry indexEntry = AzureBlobMetadata.toIndexEntry(metadata, blob.getProperties().getContentLength().intValue()); index.put(new UUID(indexEntry.getMsb(), indexEntry.getLsb()), indexEntry); } - length += blob.getProperties().getLength(); + length += blob.getProperties().getContentLength(); } return length; @@ -73,7 +80,7 @@ protected long computeArchiveIndexAndLength() throws IOException { @Override protected void doReadSegmentToBuffer(String segmentFileName, Buffer buffer) throws IOException { - readBufferFully(getBlob(segmentFileName), buffer); + readBufferFully(getBlobClient(segmentFileName), buffer); } @Override @@ -83,28 +90,29 @@ protected Buffer doReadDataFile(String extension) throws IOException { @Override protected File archivePathAsFile() { - return new File(archiveDirectory.getUri().getPath()); + return new File(archivePath); } - private CloudBlockBlob getBlob(String name) throws IOException { + private BlockBlobClient getBlobClient(String name) throws IOException { try { - return archiveDirectory.getBlockBlobReference(name); - } catch (URISyntaxException | StorageException e) { + String fullName = String.format("%s/%s", archivePath, name); + return blobContainerClient.getBlobClient(fullName).getBlockBlobClient(); + } catch (BlobStorageException e) { throw new IOException(e); } } private Buffer readBlob(String name) throws IOException { try { - CloudBlockBlob blob = getBlob(name); + BlockBlobClient blob = getBlobClient(name); if (!blob.exists()) { return null; } - long length = blob.getProperties().getLength(); + long length = blob.getProperties().getBlobSize(); Buffer buffer = Buffer.allocate((int) length); AzureUtilities.readBufferFully(blob, buffer); return buffer; - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java index e9bbbb322a9..e9b9f5d8a7a 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java @@ -16,51 +16,52 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.readBufferFully; -import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.getSegmentFileName; -import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.OFF_HEAP; - -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.concurrent.TimeUnit; - -import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.guava.common.base.Stopwatch; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; - import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.azure.util.Retrier; import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveWriter; import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; + +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.readBufferFully; +import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.OFF_HEAP; +import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.getSegmentFileName; + public class AzureSegmentArchiveWriter extends AbstractRemoteSegmentArchiveWriter { - private final CloudBlobDirectory archiveDirectory; + private final BlobContainerClient blobContainerClient; + + private final String rootPrefix; + + private final String archiveName; private final Retrier retrier = Retrier.withParams( Integer.getInteger("azure.segment.archive.writer.retries.max", 16), Integer.getInteger("azure.segment.archive.writer.retries.intervalMs", 5000) ); - private final BlobRequestOptions writeOptimisedBlobRequestOptions; - - public AzureSegmentArchiveWriter(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor, FileStoreMonitor monitor, WriteAccessController writeAccessController) { + public AzureSegmentArchiveWriter(BlobContainerClient blobContainerClient, String rootPrefix, String archiveName, IOMonitor ioMonitor, FileStoreMonitor monitor, WriteAccessController writeAccessController) { super(ioMonitor, monitor); - this.archiveDirectory = archiveDirectory; + this.blobContainerClient = blobContainerClient; + this.rootPrefix = rootPrefix; + this.archiveName = archiveName; this.writeAccessController = writeAccessController; - this.writeOptimisedBlobRequestOptions = AzureRequestOptions.optimiseForWriteOperations(archiveDirectory.getServiceClient().getDefaultRequestOptions()); } @Override public String getName() { - return AzureUtilities.getName(archiveDirectory); + return archiveName; } @Override @@ -71,17 +72,16 @@ protected void doWriteArchiveEntry(RemoteSegmentArchiveEntry indexEntry, byte[] long msb = indexEntry.getMsb(); long lsb = indexEntry.getLsb(); String segmentName = getSegmentFileName(indexEntry); - CloudBlockBlob blob = getBlob(segmentName); - ioMonitor.beforeSegmentWrite(new File(blob.getName()), msb, lsb, size); + BlockBlobClient blob = getBlockBlobClient(segmentName); + ioMonitor.beforeSegmentWrite(new File(blob.getBlobName()), msb, lsb, size); Stopwatch stopwatch = Stopwatch.createStarted(); try { + blob.upload(BinaryData.fromBytes(Arrays.copyOfRange(data, offset, offset + size)), true); blob.setMetadata(AzureBlobMetadata.toSegmentMetadata(indexEntry)); - blob.uploadFromByteArray(data, offset, size, null, writeOptimisedBlobRequestOptions, null); - blob.uploadMetadata(null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } - ioMonitor.afterSegmentWrite(new File(blob.getName()), msb, lsb, size, stopwatch.elapsed(TimeUnit.NANOSECONDS)); + ioMonitor.afterSegmentWrite(new File(blob.getBlobName()), msb, lsb, size, stopwatch.elapsed(TimeUnit.NANOSECONDS)); } @Override @@ -92,7 +92,7 @@ protected Buffer doReadArchiveEntry(RemoteSegmentArchiveEntry indexEntry) throw } else { buffer = Buffer.allocate(indexEntry.getLength()); } - readBufferFully(getBlob(getSegmentFileName(indexEntry)), buffer); + readBufferFully(getBlockBlobClient(getSegmentFileName(indexEntry)), buffer); return buffer; } @@ -102,8 +102,8 @@ protected void doWriteDataFile(byte[] data, String extension) throws IOException try { writeAccessController.checkWritingAllowed(); - getBlob(getName() + extension).uploadFromByteArray(data, 0, data.length, null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { + getBlockBlobClient(getName() + extension).upload(BinaryData.fromBytes(data), true); + } catch (BlobStorageException e) { throw new IOException(e); } }); @@ -115,8 +115,8 @@ protected void afterQueueClosed() throws IOException { try { writeAccessController.checkWritingAllowed(); - getBlob("closed").uploadFromByteArray(new byte[0], 0, 0, null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { + getBlockBlobClient("closed").upload(BinaryData.fromBytes(new byte[0]), true); + } catch (BlobStorageException e) { throw new IOException(e); } }); @@ -127,10 +127,11 @@ protected void afterQueueFlushed() { // do nothing } - private CloudBlockBlob getBlob(String name) throws IOException { + private BlockBlobClient getBlockBlobClient(String name) throws IOException { + String blobFullName = String.format("%s/%s/%s", rootPrefix, archiveName, name); try { - return archiveDirectory.getBlockBlobReference(name); - } catch (URISyntaxException | StorageException e) { + return blobContainerClient.getBlobClient(blobFullName).getBlockBlobClient(); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java index 24465ea720c..5d33fa25798 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java @@ -18,16 +18,9 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.LocationMode; -import com.microsoft.azure.storage.StorageCredentials; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import com.microsoft.azure.storage.blob.CloudBlobClient; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import org.apache.commons.lang3.StringUtils; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureSegmentStoreV8; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; -import org.jetbrains.annotations.NotNull; import org.osgi.framework.ServiceRegistration; import org.osgi.service.component.ComponentContext; import org.osgi.service.component.annotations.Activate; @@ -38,16 +31,14 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; import java.util.Hashtable; import java.util.Objects; import static org.osgi.framework.Constants.SERVICE_PID; @Component( - configurationPolicy = ConfigurationPolicy.REQUIRE, - configurationPid = {Configuration.PID}) + configurationPolicy = ConfigurationPolicy.REQUIRE, + configurationPid = {Configuration.PID}) public class AzureSegmentStoreService { private static final Logger log = LoggerFactory.getLogger(AzureSegmentStoreService.class); @@ -57,21 +48,35 @@ public class AzureSegmentStoreService { public static final String DEFAULT_ROOT_PATH = "/oak"; public static final boolean DEFAULT_ENABLE_SECONDARY_LOCATION = false; - public static final String DEFAULT_ENDPOINT_SUFFIX = "core.windows.net"; private ServiceRegistration registration; - private static AzureStorageCredentialManager azureStorageCredentialManager; + + private final boolean useAzureSdkV12 = Boolean.getBoolean("segment.azure.v12.enabled"); + @Activate public void activate(ComponentContext context, Configuration config) throws IOException { - AzurePersistence persistence = createAzurePersistenceFrom(config); - registration = context.getBundleContext() - .registerService(SegmentNodeStorePersistence.class, persistence, new Hashtable() {{ - put(SERVICE_PID, String.format("%s(%s, %s)", AzurePersistence.class.getName(), config.accountName(), config.rootPath())); - if (!Objects.equals(config.role(), "")) { - put("role", config.role()); - } - }}); + if (useAzureSdkV12) { + log.info("Starting node store using Azure SDK 12"); + AzurePersistence persistence = AzurePersistenceManager.createAzurePersistenceFrom(config); + registration = context.getBundleContext() + .registerService(SegmentNodeStorePersistence.class, persistence, new Hashtable() {{ + put(SERVICE_PID, String.format("%s(%s, %s)", AzurePersistence.class.getName(), config.accountName(), config.rootPath())); + if (!Objects.equals(config.role(), "")) { + put("role", config.role()); + } + }}); + } else { + log.info("Starting node store using Azure SDK 8"); + AzurePersistenceV8 persistence = AzureSegmentStoreV8.createAzurePersistenceFrom(config); + registration = context.getBundleContext() + .registerService(SegmentNodeStorePersistence.class, persistence, new Hashtable() {{ + put(SERVICE_PID, String.format("%s(%s, %s)", AzurePersistenceV8.class.getName(), config.accountName(), config.rootPath())); + if (!Objects.equals(config.role(), "")) { + put("role", config.role()); + } + }}); + } } @Deactivate @@ -80,99 +85,6 @@ public void deactivate() throws IOException { registration.unregister(); registration = null; } - if (azureStorageCredentialManager != null) { - azureStorageCredentialManager.close(); - } - } - - private static AzurePersistence createAzurePersistenceFrom(Configuration configuration) throws IOException { - if (!StringUtils.isBlank(configuration.connectionURL())) { - return createPersistenceFromConnectionURL(configuration); - } - if (!StringUtils.isAnyBlank(configuration.clientId(), configuration.clientSecret(), configuration.tenantId())) { - return createPersistenceFromServicePrincipalCredentials(configuration); - } - if (!StringUtils.isBlank(configuration.sharedAccessSignature())) { - return createPersistenceFromSasUri(configuration); - } - return createPersistenceFromAccessKey(configuration); - } - - private static AzurePersistence createPersistenceFromAccessKey(Configuration configuration) throws IOException { - StringBuilder connectionString = new StringBuilder(); - connectionString.append("DefaultEndpointsProtocol=https;"); - connectionString.append("AccountName=").append(configuration.accountName()).append(';'); - connectionString.append("AccountKey=").append(configuration.accessKey()).append(';'); - if (!StringUtils.isBlank(configuration.blobEndpoint())) { - connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); - } - return createAzurePersistence(connectionString.toString(), configuration, true); - } - - private static AzurePersistence createPersistenceFromSasUri(Configuration configuration) throws IOException { - StringBuilder connectionString = new StringBuilder(); - connectionString.append("DefaultEndpointsProtocol=https;"); - connectionString.append("AccountName=").append(configuration.accountName()).append(';'); - connectionString.append("SharedAccessSignature=").append(configuration.sharedAccessSignature()).append(';'); - if (!StringUtils.isBlank(configuration.blobEndpoint())) { - connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); - } - return createAzurePersistence(connectionString.toString(), configuration, false); - } - - @NotNull - private static AzurePersistence createPersistenceFromConnectionURL(Configuration configuration) throws IOException { - return createAzurePersistence(configuration.connectionURL(), configuration, true); - } - - @NotNull - private static AzurePersistence createPersistenceFromServicePrincipalCredentials(Configuration configuration) throws IOException { - azureStorageCredentialManager = new AzureStorageCredentialManager(); - StorageCredentials storageCredentialsToken = azureStorageCredentialManager.getStorageCredentialAccessTokenFromServicePrincipals(configuration.accountName(), configuration.clientId(), configuration.clientSecret(), configuration.tenantId()); - - try { - CloudStorageAccount cloud = new CloudStorageAccount(storageCredentialsToken, true, DEFAULT_ENDPOINT_SUFFIX, configuration.accountName()); - return createAzurePersistence(cloud, configuration, true); - } catch (StorageException | URISyntaxException e) { - throw new IOException(e); - } - } - - @NotNull - private static AzurePersistence createAzurePersistence(String connectionString, Configuration configuration, boolean createContainer) throws IOException { - try { - CloudStorageAccount cloud = CloudStorageAccount.parse(connectionString); - log.info("Connection string: '{}'", cloud); - return createAzurePersistence(cloud, configuration, createContainer); - } catch (StorageException | URISyntaxException | InvalidKeyException e) { - throw new IOException(e); - } - } - - @NotNull - private static AzurePersistence createAzurePersistence(CloudStorageAccount cloud, Configuration configuration, boolean createContainer) throws URISyntaxException, StorageException { - CloudBlobClient cloudBlobClient = cloud.createCloudBlobClient(); - BlobRequestOptions blobRequestOptions = new BlobRequestOptions(); - - if (configuration.enableSecondaryLocation()) { - blobRequestOptions.setLocationMode(LocationMode.PRIMARY_THEN_SECONDARY); - } - cloudBlobClient.setDefaultRequestOptions(blobRequestOptions); - - CloudBlobContainer container = cloudBlobClient.getContainerReference(configuration.containerName()); - if (createContainer && !container.exists()) { - container.create(); - } - String path = normalizePath(configuration.rootPath()); - return new AzurePersistence(container.getDirectoryReference(path)); - } - - @NotNull - private static String normalizePath(@NotNull String rootPath) { - if (rootPath.length() > 0 && rootPath.charAt(0) == '/') { - return rootPath.substring(1); - } - return rootPath; } } \ No newline at end of file diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java index 5c740fac1d2..f6743edafc5 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java @@ -16,18 +16,13 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.ResultContinuation; -import com.microsoft.azure.storage.ResultSegment; -import com.microsoft.azure.storage.StorageCredentials; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.StorageUri; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.LeaseStatus; -import com.microsoft.azure.storage.blob.ListBlobItem; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobListDetails; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.specialized.AppendBlobClient; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.commons.Buffer; import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException; import org.jetbrains.annotations.NotNull; @@ -37,13 +32,9 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; -import java.net.URI; -import java.net.URISyntaxException; import java.nio.file.Paths; -import java.security.InvalidKeyException; -import java.util.ArrayList; -import java.util.EnumSet; import java.util.List; +import java.util.stream.Collectors; public final class AzureUtilities { @@ -58,119 +49,44 @@ public final class AzureUtilities { private AzureUtilities() { } - public static String getName(CloudBlob blob) { + public static String getName(BlobItem blob) { return Paths.get(blob.getName()).getFileName().toString(); } - public static String getName(CloudBlobDirectory directory) { - return Paths.get(directory.getUri().getPath()).getFileName().toString(); + public static String getName(AppendBlobClient blob) { + return Paths.get(blob.getBlobName()).getFileName().toString(); } - public static List getBlobs(CloudBlobDirectory directory) throws IOException { - List blobList = new ArrayList<>(); - ResultContinuation token = null; - do { - ResultSegment result = listBlobsInSegments(directory, token); //get the blobs in pages of 5000 - for (ListBlobItem b : result.getResults()) { //add resultant blobs to list - if (b instanceof CloudBlob) { - CloudBlob cloudBlob = (CloudBlob) b; - blobList.add(cloudBlob); - } - } - token = result.getContinuationToken(); - } while (token != null); - return blobList; + + + public static List getBlobs(BlobContainerClient blobContainerClient, ListBlobsOptions listOptions) { + listOptions.setDetails(new BlobListDetails().setRetrieveMetadata(true)); + return blobContainerClient.listBlobs(listOptions, null).stream().collect(Collectors.toList()); } - public static void readBufferFully(CloudBlob blob, Buffer buffer) throws IOException { + public static void readBufferFully(BlockBlobClient blob, Buffer buffer) throws IOException { try { - blob.download(new ByteBufferOutputStream(buffer)); + blob.downloadStream(new ByteBufferOutputStream(buffer)); buffer.flip(); - } catch (StorageException e) { - if (e.getHttpStatusCode() == 404) { - log.error("Blob not found in the remote repository: {}", blob.getName()); - throw new FileNotFoundException("Blob not found in the remote repository: " + blob.getName()); + } catch (BlobStorageException e) { + if (e.getStatusCode() == 404) { + log.error("Blob not found in the remote repository: {}", blob.getBlobName()); + throw new FileNotFoundException("Blob not found in the remote repository: " + blob.getBlobName()); } throw new RepositoryNotReachableException(e); } } - public static void deleteAllEntries(CloudBlobDirectory directory) throws IOException { - getBlobs(directory).forEach(b -> { + public static void deleteAllEntries(BlobContainerClient blobContainerClient, ListBlobsOptions listBlobsOptions) { + getBlobs(blobContainerClient, listBlobsOptions).forEach(b -> { try { - b.deleteIfExists(); - } catch (StorageException e) { - log.error("Can't delete blob {}", b.getUri().getPath(), e); + blobContainerClient.getBlobClient(b.getName()).deleteIfExists(); + } catch (BlobStorageException e) { + log.error("Can't delete blob {}", b.getName(), e); } }); } - public static CloudBlobDirectory cloudBlobDirectoryFrom(StorageCredentials credentials, - String uri, String dir) throws URISyntaxException, StorageException { - StorageUri storageUri = new StorageUri(new URI(uri)); - CloudBlobContainer container = new CloudBlobContainer(storageUri, credentials); - - container.createIfNotExists(); - - return container.getDirectoryReference(dir); - } - - public static CloudBlobDirectory cloudBlobDirectoryFrom(String connection, String containerName, - String dir) throws InvalidKeyException, URISyntaxException, StorageException { - CloudStorageAccount cloud = CloudStorageAccount.parse(connection); - CloudBlobContainer container = cloud.createCloudBlobClient().getContainerReference(containerName); - container.createIfNotExists(); - - return container.getDirectoryReference(dir); - } - - private static ResultSegment listBlobsInSegments(CloudBlobDirectory directory, - ResultContinuation token) throws IOException { - ResultSegment result = null; - IOException lastException = null; - for (int sleep = 10; sleep <= 10000; sleep *= 10) { //increment the sleep time in steps. - try { - result = directory.listBlobsSegmented( - null, - false, - EnumSet.of(BlobListingDetails.METADATA), - 5000, - token, - null, - null); - break; //we have the results, no need to retry - } catch (StorageException | URISyntaxException e) { - lastException = new IOException(e); - try { - Thread.sleep(sleep); //Sleep and retry - } catch (InterruptedException ex) { - log.warn("Interrupted", e); - } - } - } - - if (result == null) { - throw lastException; - } else { - return result; - } - } - - public static void deleteAllBlobs(@NotNull CloudBlobDirectory directory) throws URISyntaxException, StorageException, InterruptedException { - for (ListBlobItem blobItem : directory.listBlobs()) { - if (blobItem instanceof CloudBlob) { - CloudBlob cloudBlob = (CloudBlob) blobItem; - if (cloudBlob.getProperties().getLeaseStatus() == LeaseStatus.LOCKED) { - cloudBlob.breakLease(0); - } - cloudBlob.deleteIfExists(); - } else if (blobItem instanceof CloudBlobDirectory) { - CloudBlobDirectory cloudBlobDirectory = (CloudBlobDirectory) blobItem; - deleteAllBlobs(cloudBlobDirectory); - } - } - } - private static class ByteBufferOutputStream extends OutputStream { @NotNull diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/Configuration.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/Configuration.java index f0aa1e4b1c2..b1bd54f98a7 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/Configuration.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/Configuration.java @@ -27,7 +27,7 @@ pid = {PID}, name = "Apache Jackrabbit Oak Azure Segment Store Service", description = "Azure backend for the Oak Segment Node Store") -@interface Configuration { +public @interface Configuration { String PID = "org.apache.jackrabbit.oak.segment.azure.AzureSegmentStoreService"; diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/ReverseFileReader.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/ReverseFileReader.java index 9b564b63e9f..89c0f8f27d1 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/ReverseFileReader.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/ReverseFileReader.java @@ -16,15 +16,17 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.OperationContext; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlob; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobRange; +import com.azure.storage.blob.models.BlobRequestConditions; +import com.azure.storage.blob.models.BlobStorageException; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.List; import static java.lang.Math.min; @@ -35,7 +37,9 @@ public class ReverseFileReader { private int bufferSize; - private final CloudBlob blob; + private final BlobContainerClient blobContainerClient; + + private final BlobItem blob; private byte[] buffer; @@ -43,14 +47,15 @@ public class ReverseFileReader { private int fileOffset; - public ReverseFileReader(CloudBlob blob) throws StorageException { - this (blob, BUFFER_SIZE); + public ReverseFileReader(BlobContainerClient blobContainerClient, BlobItem blob) throws BlobStorageException { + this(blobContainerClient, blob, BUFFER_SIZE); } - public ReverseFileReader(CloudBlob blob, int bufferSize) throws StorageException { + public ReverseFileReader(BlobContainerClient blobContainerClient, BlobItem blob, int bufferSize) throws BlobStorageException { + this.blobContainerClient = blobContainerClient; this.blob = blob; - if (blob.exists()) { - this.fileOffset = (int) blob.getProperties().getLength(); + if (blobContainerClient.getBlobClient(blob.getName()).exists()) { + this.fileOffset = blob.getProperties().getContentLength().intValue(); } else { this.fileOffset = 0; } @@ -67,12 +72,11 @@ private void readBlock() throws IOException { if (buffer.length > 0) { fileOffset -= buffer.length; try { - OperationContext opContext = new OperationContext(); - HashMap userHeaders = new HashMap<>(); - userHeaders.put("If-Match", "*"); - opContext.setUserHeaders(userHeaders); - blob.downloadRangeToByteArray(fileOffset, Long.valueOf(buffer.length), buffer, 0, null, null, opContext); - } catch (StorageException e) { + BlobRange blobRange = new BlobRange(Long.valueOf(fileOffset), Long.valueOf(buffer.length)); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(buffer.length); + blobContainerClient.getBlobClient(blob.getName()).downloadStreamWithResponse(outputStream, blobRange, null, new BlobRequestConditions().setIfMatch("*"), false, null, null); + buffer = outputStream.toByteArray(); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/package-info.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/package-info.java index 954129d77c7..5fc321f6f9d 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/package-info.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/package-info.java @@ -15,7 +15,7 @@ * limitations under the License. */ @Internal(since = "1.0.0") -@Version("3.0.0") +@Version("4.0.0") package org.apache.jackrabbit.oak.segment.azure; import org.apache.jackrabbit.oak.commons.annotations.Internal; diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCheck.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCheck.java index 7501a5f7e9c..98c153bc263 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCheck.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCheck.java @@ -18,8 +18,8 @@ import com.google.common.io.Files; import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.segment.file.JournalReader; import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore; @@ -348,7 +348,7 @@ public void afterSegmentRead(File file, long msb, long lsb, int length, long ela private final Integer persistentCacheSizeGb; private final CloudBlobDirectory cloudBlobDirectory; - private final AzureStorageCredentialManager azureStorageCredentialManager; + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; private AzureCheck(Builder builder) { this.path = builder.path; @@ -367,7 +367,7 @@ private AzureCheck(Builder builder) { this.persistentCachePath = builder.persistentCachePath; this.persistentCacheSizeGb = builder.persistentCacheSizeGb; this.cloudBlobDirectory = builder.cloudBlobDirectory; - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } private static Integer revisionsToCheckCount(Integer revisionsCount) { @@ -379,9 +379,9 @@ public int run() { SegmentNodeStorePersistence persistence; if (cloudBlobDirectory != null) { - persistence = new AzurePersistence(cloudBlobDirectory); + persistence = new AzurePersistenceV8(cloudBlobDirectory); } else { - persistence = ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, path, azureStorageCredentialManager); + persistence = ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, path, azureStorageCredentialManagerV8); } if (persistentCachePath != null) { @@ -428,7 +428,7 @@ public int run() { e.printStackTrace(err); return 1; } finally { - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCompact.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCompact.java index 364f774b7da..21ee265414c 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCompact.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCompact.java @@ -36,8 +36,8 @@ import com.microsoft.azure.storage.blob.ListBlobItem; import org.apache.jackrabbit.oak.segment.SegmentCache; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.SegmentStoreType; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.GCType; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.CompactorType; @@ -307,7 +307,7 @@ public AzureCompact build() { private final CloudBlobDirectory sourceCloudBlobDirectory; private final CloudBlobDirectory destinationCloudBlobDirectory; - private final AzureStorageCredentialManager azureStorageCredentialManager; + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; private AzureCompact(Builder builder) { this.path = builder.path; @@ -324,7 +324,7 @@ private AzureCompact(Builder builder) { this.garbageThresholdPercentage = builder.garbageThresholdPercentage; this.sourceCloudBlobDirectory = builder.sourceCloudBlobDirectory; this.destinationCloudBlobDirectory = builder.destinationCloudBlobDirectory; - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } public int run() throws IOException, StorageException, URISyntaxException { @@ -333,11 +333,11 @@ public int run() throws IOException, StorageException, URISyntaxException { SegmentNodeStorePersistence roPersistence; SegmentNodeStorePersistence rwPersistence; if (sourceCloudBlobDirectory != null && destinationCloudBlobDirectory != null) { - roPersistence = new AzurePersistence(sourceCloudBlobDirectory); - rwPersistence = new AzurePersistence(destinationCloudBlobDirectory); + roPersistence = new AzurePersistenceV8(sourceCloudBlobDirectory); + rwPersistence = new AzurePersistenceV8(destinationCloudBlobDirectory); } else { - roPersistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, path, azureStorageCredentialManager); - rwPersistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, targetPath, azureStorageCredentialManager); + roPersistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, path, azureStorageCredentialManagerV8); + rwPersistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, targetPath, azureStorageCredentialManagerV8); } if (persistentCachePath != null) { @@ -363,7 +363,7 @@ public int run() throws IOException, StorageException, URISyntaxException { CloudBlobContainer targetContainer = null; if (targetPath != null) { - CloudBlobDirectory targetDirectory = createCloudBlobDirectory(targetPath.substring(3), azureStorageCredentialManager); + CloudBlobDirectory targetDirectory = createCloudBlobDirectory(targetPath.substring(3), azureStorageCredentialManagerV8); targetContainer = targetDirectory.getContainer(); } else { targetContainer = destinationCloudBlobDirectory.getContainer(); @@ -424,7 +424,7 @@ public int run() throws IOException, StorageException, URISyntaxException { persistGCJournal(rwPersistence, newSize, gcGeneration, root); // close azure storage credential manager - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); return 0; } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopy.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopy.java index d14d95084b5..706f64885ae 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopy.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopy.java @@ -43,7 +43,7 @@ import java.util.concurrent.Future; import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.SegmentStoreMigrator.Segment; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.SegmentStoreType; import org.apache.jackrabbit.oak.segment.azure.util.Retrier; @@ -260,7 +260,7 @@ public SegmentCopy build() { private SegmentNodeStorePersistence destPersistence; private ExecutorService executor = Executors.newFixedThreadPool(READ_THREADS + 1); - private final AzureStorageCredentialManager azureStorageCredentialManager; + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; public SegmentCopy(Builder builder) { this.source = builder.source; @@ -273,7 +273,7 @@ public SegmentCopy(Builder builder) { this.maxSizeGb = builder.maxSizeGb; this.outWriter = builder.outWriter; this.errWriter = builder.errWriter; - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } public int run() { @@ -288,7 +288,7 @@ public int run() { if (flat && destType == SegmentStoreType.TAR) { try { if (srcPersistence == null) { - srcPersistence = newSegmentNodeStorePersistence(srcType, source, azureStorageCredentialManager); + srcPersistence = newSegmentNodeStorePersistence(srcType, source, azureStorageCredentialManagerV8); } SegmentArchiveManager sourceManager = srcPersistence.createArchiveManager(false, false, @@ -367,13 +367,13 @@ public int run() { e.printStackTrace(errWriter); return 1; } finally { - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); } } else { try { if (srcPersistence == null || destPersistence == null) { - srcPersistence = newSegmentNodeStorePersistence(srcType, source, azureStorageCredentialManager); - destPersistence = newSegmentNodeStorePersistence(destType, destination, azureStorageCredentialManager); + srcPersistence = newSegmentNodeStorePersistence(srcType, source, azureStorageCredentialManagerV8); + destPersistence = newSegmentNodeStorePersistence(destType, destination, azureStorageCredentialManagerV8); } printMessage(outWriter, "Started segment-copy transfer!"); @@ -398,7 +398,7 @@ public int run() { e.printStackTrace(errWriter); return 1; } finally { - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentStoreMigrator.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentStoreMigrator.java index 43677b62efb..6bba400c16c 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentStoreMigrator.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentStoreMigrator.java @@ -23,7 +23,7 @@ import com.microsoft.azure.storage.blob.CloudBlobDirectory; import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.SegmentStoreType; import org.apache.jackrabbit.oak.segment.azure.util.Retrier; import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence; @@ -290,7 +290,7 @@ public Builder withSource(File dir) { } public Builder withSource(CloudBlobDirectory dir) throws URISyntaxException, StorageException { - this.source = new AzurePersistence(dir); + this.source = new AzurePersistenceV8(dir); this.sourceName = storeDescription(SegmentStoreType.AZURE, dir.getContainer().getName() + "/" + dir.getPrefix()); return this; } @@ -314,7 +314,7 @@ public Builder withTarget(File dir) { } public Builder withTarget(CloudBlobDirectory dir) throws URISyntaxException, StorageException { - this.target = new AzurePersistence(dir); + this.target = new AzurePersistenceV8(dir); this.targetName = storeDescription(SegmentStoreType.AZURE, dir.getContainer().getName() + "/" + dir.getPrefix()); return this; } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtils.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtils.java index cbf206b1cd8..c9f0321d7d2 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtils.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtils.java @@ -39,9 +39,9 @@ import org.apache.jackrabbit.oak.commons.Buffer; import org.apache.jackrabbit.oak.commons.collections.CollectionUtils; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.CompactorType; import org.apache.jackrabbit.oak.segment.file.*; @@ -131,14 +131,14 @@ public static SegmentNodeStorePersistence decorateWithCache(SegmentNodeStorePers public static SegmentNodeStorePersistence newSegmentNodeStorePersistence(SegmentStoreType storeType, String pathOrUri, - @Nullable AzureStorageCredentialManager azureStorageCredentialManager) { + @Nullable AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8) { SegmentNodeStorePersistence persistence = null; switch (storeType) { case AZURE: - Objects.requireNonNull(azureStorageCredentialManager, "azure storage credentials manager instance cannot be null"); - CloudBlobDirectory cloudBlobDirectory = createCloudBlobDirectory(pathOrUri.substring(3), azureStorageCredentialManager); - persistence = new AzurePersistence(cloudBlobDirectory); + Objects.requireNonNull(azureStorageCredentialManagerV8, "azure storage credentials manager instance cannot be null"); + CloudBlobDirectory cloudBlobDirectory = createCloudBlobDirectory(pathOrUri.substring(3), azureStorageCredentialManagerV8); + persistence = new AzurePersistenceV8(cloudBlobDirectory); break; default: persistence = new TarPersistence(new File(pathOrUri)); @@ -160,13 +160,13 @@ public static SegmentArchiveManager createArchiveManager(SegmentNodeStorePersist return archiveManager; } - public static CloudBlobDirectory createCloudBlobDirectory(String path, AzureStorageCredentialManager azureStorageCredentialManager) { - return createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManager); + public static CloudBlobDirectory createCloudBlobDirectory(String path, AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8) { + return createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManagerV8); } public static CloudBlobDirectory createCloudBlobDirectory(String path, Environment environment, - AzureStorageCredentialManager azureStorageCredentialManager) { + AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8) { Map config = parseAzureConfigurationFromUri(path); String accountName = config.get(KEY_ACCOUNT_NAME); @@ -175,14 +175,14 @@ public static CloudBlobDirectory createCloudBlobDirectory(String path, if (config.containsKey(KEY_SHARED_ACCESS_SIGNATURE)) { credentials = new StorageCredentialsSharedAccessSignature(config.get(KEY_SHARED_ACCESS_SIGNATURE)); } else { - credentials = azureStorageCredentialManager.getStorageCredentialsFromEnvironment(accountName, environment); + credentials = azureStorageCredentialManagerV8.getStorageCredentialsFromEnvironment(accountName, environment); } String uri = config.get(KEY_STORAGE_URI); String dir = config.get(KEY_DIR); try { - return AzureUtilities.cloudBlobDirectoryFrom(credentials, uri, dir); + return AzureUtilitiesV8.cloudBlobDirectoryFrom(credentials, uri, dir); } catch (URISyntaxException | StorageException e) { throw new IllegalArgumentException( "Could not connect to the Azure Storage. Please verify the path provided!"); @@ -190,8 +190,8 @@ public static CloudBlobDirectory createCloudBlobDirectory(String path, } public static List readRevisions(String uri) { - try (AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager()) { - SegmentNodeStorePersistence persistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, uri, azureStorageCredentialManager); + try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { + SegmentNodeStorePersistence persistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, uri, azureStorageCredentialManagerV8); JournalFile journal = persistence.getJournalFile(); if (journal.exists()) { try (JournalReader journalReader = new JournalReader(journal)) { diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java index 24452d15ea9..14a3542468f 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java @@ -19,10 +19,8 @@ package org.apache.jackrabbit.oak.segment.azure.util; -import com.microsoft.azure.storage.RetryLinearRetry; -import com.microsoft.azure.storage.blob.BlobRequestOptions; - -import java.util.concurrent.TimeUnit; +import com.azure.storage.common.policy.RequestRetryOptions; +import com.azure.storage.common.policy.RetryPolicyType; public class AzureRequestOptions { @@ -32,65 +30,54 @@ public class AzureRequestOptions { static final String RETRY_BACKOFF_PROP = "segment.azure.retry.backoff"; static final int DEFAULT_RETRY_BACKOFF_SECONDS = 5; - static final String TIMEOUT_EXECUTION_PROP = "segment.timeout.execution"; - static final int DEFAULT_TIMEOUT_EXECUTION = 30; - static final String TIMEOUT_INTERVAL_PROP = "segment.timeout.interval"; static final int DEFAULT_TIMEOUT_INTERVAL = 1; - static final String WRITE_TIMEOUT_EXECUTION_PROP = "segment.write.timeout.execution"; - static final String WRITE_TIMEOUT_INTERVAL_PROP = "segment.write.timeout.interval"; private AzureRequestOptions() { } - /** - * Apply default request options to the blobRequestOptions if they are not already set. - * @param blobRequestOptions - */ - public static void applyDefaultRequestOptions(BlobRequestOptions blobRequestOptions) { - if (blobRequestOptions.getRetryPolicyFactory() == null) { - int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); - if (retryAttempts > 0) { - Integer retryBackoffSeconds = Integer.getInteger(RETRY_BACKOFF_PROP, DEFAULT_RETRY_BACKOFF_SECONDS); - blobRequestOptions.setRetryPolicyFactory(new RetryLinearRetry((int) TimeUnit.SECONDS.toMillis(retryBackoffSeconds), retryAttempts)); - } - } - if (blobRequestOptions.getMaximumExecutionTimeInMs() == null) { - int timeoutExecution = Integer.getInteger(TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); - if (timeoutExecution > 0) { - blobRequestOptions.setMaximumExecutionTimeInMs((int) TimeUnit.SECONDS.toMillis(timeoutExecution)); - } - } - if (blobRequestOptions.getTimeoutIntervalInMs() == null) { - int timeoutInterval = Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); - if (timeoutInterval > 0) { - blobRequestOptions.setTimeoutIntervalInMs((int) TimeUnit.SECONDS.toMillis(timeoutInterval)); - } - } + + public static RequestRetryOptions getRetryOptionsDefault() { + return getRetryOptionsDefault(null); + } + + public static RequestRetryOptions getRetryOptionsDefault(String secondaryHost) { + int maxTries = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); + int tryTimeoutInSeconds = getReadTryTimeoutInSeconds(); + long retryDelayInMs = Integer.getInteger(RETRY_BACKOFF_PROP, DEFAULT_RETRY_BACKOFF_SECONDS) * 1_000L; + long maxRetryDelayInMs = retryDelayInMs; + + return new RequestRetryOptions(RetryPolicyType.FIXED, + maxTries, + tryTimeoutInSeconds, + retryDelayInMs, + maxRetryDelayInMs, + secondaryHost); } /** - * Optimise the blob request options for write operations. This method does not change the original blobRequestOptions. - * This method also applies the default request options if they are not already set, by calling {@link #applyDefaultRequestOptions(BlobRequestOptions)} - * @param blobRequestOptions - * @return write optimised blobRequestOptions + * secondaryHost is null because there is no writer in secondary + * @return */ - public static BlobRequestOptions optimiseForWriteOperations(BlobRequestOptions blobRequestOptions) { - BlobRequestOptions writeOptimisedBlobRequestOptions = new BlobRequestOptions(blobRequestOptions); - applyDefaultRequestOptions(writeOptimisedBlobRequestOptions); - - Integer writeTimeoutExecution = Integer.getInteger(WRITE_TIMEOUT_EXECUTION_PROP); - if (writeTimeoutExecution != null) { - writeOptimisedBlobRequestOptions.setMaximumExecutionTimeInMs((int) TimeUnit.SECONDS.toMillis(writeTimeoutExecution)); - } - - Integer writeTimeoutInterval = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP); - if (writeTimeoutInterval != null) { - writeOptimisedBlobRequestOptions.setTimeoutIntervalInMs((int) TimeUnit.SECONDS.toMillis(writeTimeoutInterval)); - } + public static RequestRetryOptions getRetryOperationsOptimiseForWriteOperations() { + int maxTries = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); + // if the value for write is not set use the read value + int tryTimeoutInSeconds = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP, getReadTryTimeoutInSeconds()); + long retryDelayInMs = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP, DEFAULT_RETRY_BACKOFF_SECONDS) * 1_000L; + long maxRetryDelayInMs = retryDelayInMs; + + return new RequestRetryOptions(RetryPolicyType.FIXED, + maxTries, + tryTimeoutInSeconds, + retryDelayInMs, + maxRetryDelayInMs, + null); + } - return writeOptimisedBlobRequestOptions; + private static int getReadTryTimeoutInSeconds() { + return Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); } -} + +} \ No newline at end of file diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8.java new file mode 100644 index 00000000000..59700d656f9 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.jackrabbit.oak.segment.azure.util; + +import com.microsoft.azure.storage.RetryLinearRetry; +import com.microsoft.azure.storage.blob.BlobRequestOptions; + +import java.util.concurrent.TimeUnit; + +public class AzureRequestOptionsV8 { + + static final String RETRY_ATTEMPTS_PROP = "segment.azure.retry.attempts"; + static final int DEFAULT_RETRY_ATTEMPTS = 5; + + static final String RETRY_BACKOFF_PROP = "segment.azure.retry.backoff"; + static final int DEFAULT_RETRY_BACKOFF_SECONDS = 5; + + static final String TIMEOUT_EXECUTION_PROP = "segment.timeout.execution"; + static final int DEFAULT_TIMEOUT_EXECUTION = 30; + + static final String TIMEOUT_INTERVAL_PROP = "segment.timeout.interval"; + static final int DEFAULT_TIMEOUT_INTERVAL = 1; + + static final String WRITE_TIMEOUT_EXECUTION_PROP = "segment.write.timeout.execution"; + + static final String WRITE_TIMEOUT_INTERVAL_PROP = "segment.write.timeout.interval"; + + private AzureRequestOptionsV8() { + } + + /** + * Apply default request options to the blobRequestOptions if they are not already set. + * @param blobRequestOptions + */ + public static void applyDefaultRequestOptions(BlobRequestOptions blobRequestOptions) { + if (blobRequestOptions.getRetryPolicyFactory() == null) { + int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); + if (retryAttempts > 0) { + Integer retryBackoffSeconds = Integer.getInteger(RETRY_BACKOFF_PROP, DEFAULT_RETRY_BACKOFF_SECONDS); + blobRequestOptions.setRetryPolicyFactory(new RetryLinearRetry((int) TimeUnit.SECONDS.toMillis(retryBackoffSeconds), retryAttempts)); + } + } + if (blobRequestOptions.getMaximumExecutionTimeInMs() == null) { + int timeoutExecution = Integer.getInteger(TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); + if (timeoutExecution > 0) { + blobRequestOptions.setMaximumExecutionTimeInMs((int) TimeUnit.SECONDS.toMillis(timeoutExecution)); + } + } + if (blobRequestOptions.getTimeoutIntervalInMs() == null) { + int timeoutInterval = Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); + if (timeoutInterval > 0) { + blobRequestOptions.setTimeoutIntervalInMs((int) TimeUnit.SECONDS.toMillis(timeoutInterval)); + } + } + } + + /** + * Optimise the blob request options for write operations. This method does not change the original blobRequestOptions. + * This method also applies the default request options if they are not already set, by calling {@link #applyDefaultRequestOptions(BlobRequestOptions)} + * @param blobRequestOptions + * @return write optimised blobRequestOptions + */ + public static BlobRequestOptions optimiseForWriteOperations(BlobRequestOptions blobRequestOptions) { + BlobRequestOptions writeOptimisedBlobRequestOptions = new BlobRequestOptions(blobRequestOptions); + applyDefaultRequestOptions(writeOptimisedBlobRequestOptions); + + Integer writeTimeoutExecution = Integer.getInteger(WRITE_TIMEOUT_EXECUTION_PROP); + if (writeTimeoutExecution != null) { + writeOptimisedBlobRequestOptions.setMaximumExecutionTimeInMs((int) TimeUnit.SECONDS.toMillis(writeTimeoutExecution)); + } + + Integer writeTimeoutInterval = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP); + if (writeTimeoutInterval != null) { + writeOptimisedBlobRequestOptions.setTimeoutIntervalInMs((int) TimeUnit.SECONDS.toMillis(writeTimeoutInterval)); + } + + return writeOptimisedBlobRequestOptions; + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java new file mode 100644 index 00000000000..792686fe8b9 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@Internal(since = "1.0.0") +@Version("2.0.0") +package org.apache.jackrabbit.oak.segment.azure.util; + +import org.apache.jackrabbit.oak.commons.annotations.Internal; +import org.osgi.annotation.versioning.Version; diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java new file mode 100644 index 00000000000..d33c95b06f8 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java @@ -0,0 +1,330 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobListingDetails; +import com.microsoft.azure.storage.blob.CloudBlob; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.microsoft.azure.storage.blob.CopyStatus; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.remote.RemoteUtilities; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.UUID; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static org.apache.jackrabbit.oak.commons.conditions.Validate.checkArgument; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.getName; + +public class AzureArchiveManagerV8 implements SegmentArchiveManager { + + private static final Logger log = LoggerFactory.getLogger(AzureSegmentArchiveReaderV8.class); + + protected final CloudBlobDirectory cloudBlobDirectory; + + protected final IOMonitor ioMonitor; + + protected final FileStoreMonitor monitor; + private WriteAccessController writeAccessController; + + public AzureArchiveManagerV8(CloudBlobDirectory segmentstoreDirectory, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, WriteAccessController writeAccessController) { + this.cloudBlobDirectory = segmentstoreDirectory; + this.ioMonitor = ioMonitor; + this.monitor = fileStoreMonitor; + this.writeAccessController = writeAccessController; + } + + @Override + public List listArchives() throws IOException { + try { + List archiveNames = StreamSupport.stream(cloudBlobDirectory + .listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), null, null) + .spliterator(), false) + .filter(i -> i instanceof CloudBlobDirectory) + .map(i -> (CloudBlobDirectory) i) + .filter(i -> getName(i).endsWith(".tar")) + .map(CloudBlobDirectory::getPrefix) + .map(Paths::get) + .map(Path::getFileName) + .map(Path::toString) + .collect(Collectors.toList()); + + Iterator it = archiveNames.iterator(); + while (it.hasNext()) { + String archiveName = it.next(); + if (isArchiveEmpty(archiveName)) { + delete(archiveName); + it.remove(); + } + } + return archiveNames; + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + /** + * Check if there's a valid 0000. segment in the archive + * @param archiveName + * @return true if the archive is empty (no 0000.* segment) + */ + private boolean isArchiveEmpty(String archiveName) throws IOException, URISyntaxException, StorageException { + return !getDirectory(archiveName).listBlobs("0000.").iterator().hasNext(); + } + + @Override + public SegmentArchiveReader open(String archiveName) throws IOException { + try { + CloudBlobDirectory archiveDirectory = getDirectory(archiveName); + if (!archiveDirectory.getBlockBlobReference("closed").exists()) { + return null; + } + return new AzureSegmentArchiveReaderV8(archiveDirectory, ioMonitor); + } catch (StorageException | URISyntaxException e) { + throw new IOException(e); + } + } + + @Override + public SegmentArchiveReader forceOpen(String archiveName) throws IOException { + CloudBlobDirectory archiveDirectory = getDirectory(archiveName); + return new AzureSegmentArchiveReaderV8(archiveDirectory, ioMonitor); + } + + @Override + public SegmentArchiveWriter create(String archiveName) throws IOException { + return new AzureSegmentArchiveWriterV8(getDirectory(archiveName), ioMonitor, monitor, writeAccessController); + } + + @Override + public boolean delete(String archiveName) { + try { + getBlobs(archiveName) + .forEach(cloudBlob -> { + try { + writeAccessController.checkWritingAllowed(); + cloudBlob.delete(); + } catch (StorageException e) { + log.error("Can't delete segment {}", cloudBlob.getUri().getPath(), e); + } + }); + return true; + } catch (IOException e) { + log.error("Can't delete archive {}", archiveName, e); + return false; + } + } + + @Override + public boolean renameTo(String from, String to) { + try { + CloudBlobDirectory targetDirectory = getDirectory(to); + getBlobs(from) + .forEach(cloudBlob -> { + try { + writeAccessController.checkWritingAllowed(); + renameBlob(cloudBlob, targetDirectory); + } catch (IOException e) { + log.error("Can't rename segment {}", cloudBlob.getUri().getPath(), e); + } + }); + return true; + } catch (IOException e) { + log.error("Can't rename archive {} to {}", from, to, e); + return false; + } + } + + @Override + public void copyFile(String from, String to) throws IOException { + CloudBlobDirectory targetDirectory = getDirectory(to); + getBlobs(from) + .forEach(cloudBlob -> { + try { + copyBlob(cloudBlob, targetDirectory); + } catch (IOException e) { + log.error("Can't copy segment {}", cloudBlob.getUri().getPath(), e); + } + }); + } + + @Override + public boolean exists(String archiveName) { + try { + return getDirectory(archiveName).listBlobsSegmented(null, false, null, 1, null, null, null).getLength() > 0; + } catch (IOException | StorageException | URISyntaxException e) { + log.error("Can't check the existence of {}", archiveName, e); + return false; + } + } + + @Override + public void recoverEntries(String archiveName, LinkedHashMap entries) throws IOException { + Pattern pattern = Pattern.compile(RemoteUtilities.SEGMENT_FILE_NAME_PATTERN); + List entryList = new ArrayList<>(); + + for (CloudBlob b : getBlobs(archiveName)) { + String name = getName(b); + Matcher m = pattern.matcher(name); + if (!m.matches()) { + continue; + } + int position = Integer.parseInt(m.group(1), 16); + UUID uuid = UUID.fromString(m.group(2)); + long length = b.getProperties().getLength(); + if (length > 0) { + byte[] data = new byte[(int) length]; + try { + b.downloadToByteArray(data, 0); + } catch (StorageException e) { + throw new IOException(e); + } + entryList.add(new RecoveredEntry(position, uuid, data, name)); + } + } + Collections.sort(entryList); + + int i = 0; + for (RecoveredEntry e : entryList) { + if (e.position != i) { + log.warn("Missing entry {}.??? when recovering {}. No more segments will be read.", String.format("%04X", i), archiveName); + break; + } + log.info("Recovering segment {}/{}", archiveName, e.fileName); + entries.put(e.uuid, e.data); + i++; + } + } + + private void delete(String archiveName, Set recoveredEntries) throws IOException { + getBlobs(archiveName) + .forEach(cloudBlob -> { + if (!recoveredEntries.contains(RemoteUtilities.getSegmentUUID(getName(cloudBlob)))) { + try { + cloudBlob.delete(); + } catch (StorageException e) { + log.error("Can't delete segment {}", cloudBlob.getUri().getPath(), e); + } + } + }); + } + + /** + * Method is not deleting segments from the directory given with {@code archiveName}, if they are in the set of recovered segments. + * Reason for that is because during execution of this method, remote repository can be accessed by another application, and deleting a valid segment can + * cause consistency issues there. + */ + @Override + public void backup(@NotNull String archiveName, @NotNull String backupArchiveName, @NotNull Set recoveredEntries) throws IOException { + copyFile(archiveName, backupArchiveName); + delete(archiveName, recoveredEntries); + } + + protected CloudBlobDirectory getDirectory(String archiveName) throws IOException { + try { + return cloudBlobDirectory.getDirectoryReference(archiveName); + } catch (URISyntaxException e) { + throw new IOException(e); + } + } + + private List getBlobs(String archiveName) throws IOException { + return AzureUtilitiesV8.getBlobs(getDirectory(archiveName)); + } + + private void renameBlob(CloudBlob blob, CloudBlobDirectory newParent) throws IOException { + copyBlob(blob, newParent); + try { + blob.delete(); + } catch (StorageException e) { + throw new IOException(e); + } + } + + private void copyBlob(CloudBlob blob, CloudBlobDirectory newParent) throws IOException { + checkArgument(blob instanceof CloudBlockBlob, "Only page blobs are supported for the rename"); + try { + String blobName = getName(blob); + CloudBlockBlob newBlob = newParent.getBlockBlobReference(blobName); + newBlob.startCopy(blob.getUri()); + + boolean isStatusPending = true; + while (isStatusPending) { + newBlob.downloadAttributes(); + if (newBlob.getCopyState().getStatus() == CopyStatus.PENDING) { + Thread.sleep(100); + } else { + isStatusPending = false; + } + } + + CopyStatus finalStatus = newBlob.getCopyState().getStatus(); + if (newBlob.getCopyState().getStatus() != CopyStatus.SUCCESS) { + throw new IOException("Invalid copy status for " + blob.getUri().getPath() + ": " + finalStatus); + } + } catch (StorageException | InterruptedException | URISyntaxException e) { + throw new IOException(e); + } + } + + private static class RecoveredEntry implements Comparable { + + private final byte[] data; + + private final UUID uuid; + + private final int position; + + private final String fileName; + + public RecoveredEntry(int position, UUID uuid, byte[] data, String fileName) { + this.data = data; + this.uuid = uuid; + this.position = position; + this.fileName = fileName; + } + + @Override + public int compareTo(RecoveredEntry o) { + return Integer.compare(this.position, o.position); + } + } + +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalFileV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalFileV8.java new file mode 100644 index 00000000000..02d9c9f63ea --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalFileV8.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.oak.segment.spi.persistence.GCJournalFile; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.List; + +public class AzureGCJournalFileV8 implements GCJournalFile { + + private final CloudAppendBlob gcJournal; + + public AzureGCJournalFileV8(CloudAppendBlob gcJournal) { + this.gcJournal = gcJournal; + } + + @Override + public void writeLine(String line) throws IOException { + try { + if (!gcJournal.exists()) { + gcJournal.createOrReplace(); + } + gcJournal.appendText(line + "\n", StandardCharsets.UTF_8.name(), null, null, null); + } catch (StorageException e) { + throw new IOException(e); + } + } + + @Override + public List readLines() throws IOException { + try { + if (!gcJournal.exists()) { + return Collections.emptyList(); + } + byte[] data = new byte[(int) gcJournal.getProperties().getLength()]; + gcJournal.downloadToByteArray(data, 0); + return IOUtils.readLines(new ByteArrayInputStream(data), Charset.defaultCharset()); + } catch (StorageException e) { + throw new IOException(e); + } + } + + @Override + public void truncate() throws IOException { + try { + if (gcJournal.exists()) { + gcJournal.delete(); + } + } catch (StorageException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java new file mode 100644 index 00000000000..0ee8914f5d4 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java @@ -0,0 +1,327 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import org.apache.jackrabbit.guava.common.collect.ImmutableList; +import org.apache.jackrabbit.oak.commons.collections.CollectionUtils; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlob; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.ListBlobItem; +import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptionsV8; +import org.apache.jackrabbit.oak.segment.azure.util.CaseInsensitiveKeysMapAccess; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileWriter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class AzureJournalFileV8 implements JournalFile { + + private static final Logger log = LoggerFactory.getLogger(AzureJournalFileV8.class); + + private static final int JOURNAL_LINE_LIMIT = Integer.getInteger("org.apache.jackrabbit.oak.segment.azure.journal.lines", 40_000); + + private final CloudBlobDirectory directory; + + private final String journalNamePrefix; + + private final int lineLimit; + + private final WriteAccessController writeAccessController; + + AzureJournalFileV8(CloudBlobDirectory directory, String journalNamePrefix, WriteAccessController writeAccessController, int lineLimit) { + this.directory = directory; + this.journalNamePrefix = journalNamePrefix; + this.lineLimit = lineLimit; + this.writeAccessController = writeAccessController; + } + + public AzureJournalFileV8(CloudBlobDirectory directory, String journalNamePrefix, WriteAccessController writeAccessController) { + this(directory, journalNamePrefix, writeAccessController, JOURNAL_LINE_LIMIT); + } + + @Override + public JournalFileReader openJournalReader() throws IOException { + return new CombinedReader(getJournalBlobs()); + } + + @Override + public JournalFileWriter openJournalWriter() throws IOException { + return new AzureJournalWriter(); + } + + @Override + public String getName() { + return journalNamePrefix; + } + + @Override + public boolean exists() { + try { + return !getJournalBlobs().isEmpty(); + } catch (IOException e) { + log.error("Can't check if the file exists", e); + return false; + } + } + + private String getJournalFileName(int index) { + return String.format("%s.%03d", journalNamePrefix, index); + } + + private List getJournalBlobs() throws IOException { + try { + List result = new ArrayList<>(); + for (ListBlobItem b : directory.listBlobs(journalNamePrefix)) { + if (b instanceof CloudAppendBlob) { + result.add((CloudAppendBlob) b); + } else { + log.warn("Invalid blob type: {} {}", b.getUri(), b.getClass()); + } + } + result.sort(Comparator.comparing(AzureUtilitiesV8::getName).reversed()); + return result; + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + private static class AzureJournalReader implements JournalFileReader { + + private final CloudBlob blob; + + private ReverseFileReaderV8 reader; + + private boolean metadataFetched; + + private boolean firstLineReturned; + + private AzureJournalReader(CloudBlob blob) { + this.blob = blob; + } + + @Override + public String readLine() throws IOException { + if (reader == null) { + try { + if (!metadataFetched) { + blob.downloadAttributes(); + metadataFetched = true; + Map metadata = CaseInsensitiveKeysMapAccess.convert(blob.getMetadata()); + if (metadata.containsKey("lastEntry")) { + firstLineReturned = true; + return metadata.get("lastEntry"); + } + } + reader = new ReverseFileReaderV8(blob); + if (firstLineReturned) { + while("".equals(reader.readLine())); // the first line was already returned, let's fast-forward it + } + } catch (StorageException e) { + throw new IOException(e); + } + } + return reader.readLine(); + } + + @Override + public void close() throws IOException { + } + } + + private class AzureJournalWriter implements JournalFileWriter { + + private CloudAppendBlob currentBlob; + + private int lineCount; + + private final BlobRequestOptions writeOptimisedBlobRequestOptions; + + public AzureJournalWriter() throws IOException { + writeOptimisedBlobRequestOptions = AzureRequestOptionsV8.optimiseForWriteOperations(directory.getServiceClient().getDefaultRequestOptions()); + + List blobs = getJournalBlobs(); + if (blobs.isEmpty()) { + try { + currentBlob = directory.getAppendBlobReference(getJournalFileName(1)); + currentBlob.createOrReplace(); + currentBlob.downloadAttributes(); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } else { + currentBlob = blobs.get(0); + } + try { + currentBlob.downloadAttributes(); + } catch (StorageException e) { + throw new IOException(e); + } + String lc = currentBlob.getMetadata().get("lineCount"); + lineCount = lc == null ? 0 : Integer.parseInt(lc); + } + + @Override + public void truncate() throws IOException { + try { + writeAccessController.checkWritingAllowed(); + + for (CloudAppendBlob cloudAppendBlob : getJournalBlobs()) { + cloudAppendBlob.delete(DeleteSnapshotsOption.NONE, null, writeOptimisedBlobRequestOptions, null); + } + + createNextFile(0); + } catch (StorageException e) { + throw new IOException(e); + } + } + + @Override + public void writeLine(String line) throws IOException { + batchWriteLines(ImmutableList.of(line)); + } + + @Override + public void batchWriteLines(List lines) throws IOException { + writeAccessController.checkWritingAllowed(); + + if (lines.isEmpty()) { + return; + } + int firstBlockSize = Math.min(lineLimit - lineCount, lines.size()); + List firstBlock = lines.subList(0, firstBlockSize); + List> remainingBlocks = CollectionUtils.partitionList(lines.subList(firstBlockSize, lines.size()), lineLimit); + List> allBlocks = ImmutableList.>builder() + .addAll(firstBlock.isEmpty() ? ImmutableList.of() : ImmutableList.of(firstBlock)) + .addAll(remainingBlocks) + .build(); + + for (List entries : allBlocks) { + if (lineCount >= lineLimit) { + int parsedSuffix = parseCurrentSuffix(); + createNextFile(parsedSuffix); + } + StringBuilder text = new StringBuilder(); + for (String line : entries) { + text.append(line).append("\n"); + } + try { + currentBlob.appendText(text.toString(), null, null, writeOptimisedBlobRequestOptions, null); + currentBlob.getMetadata().put("lastEntry", entries.get(entries.size() - 1)); + lineCount += entries.size(); + currentBlob.getMetadata().put("lineCount", Integer.toString(lineCount)); + currentBlob.uploadMetadata(null, writeOptimisedBlobRequestOptions, null); + } catch (StorageException e) { + throw new IOException(e); + } + } + } + + private void createNextFile(int suffix) throws IOException { + try { + currentBlob = directory.getAppendBlobReference(getJournalFileName(suffix + 1)); + currentBlob.createOrReplace(null, writeOptimisedBlobRequestOptions, null); + lineCount = 0; + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + private int parseCurrentSuffix() { + String name = AzureUtilitiesV8.getName(currentBlob); + Pattern pattern = Pattern.compile(Pattern.quote(journalNamePrefix) + "\\.(\\d+)" ); + Matcher matcher = pattern.matcher(name); + int parsedSuffix; + if (matcher.find()) { + String suffix = matcher.group(1); + try { + parsedSuffix = Integer.parseInt(suffix); + } catch (NumberFormatException e) { + log.warn("Can't parse suffix for journal file {}", name); + parsedSuffix = 0; + } + } else { + log.warn("Can't parse journal file name {}", name); + parsedSuffix = 0; + } + return parsedSuffix; + } + + @Override + public void close() throws IOException { + // do nothing + } + } + + private static class CombinedReader implements JournalFileReader { + + private final Iterator readers; + + private JournalFileReader currentReader; + + private CombinedReader(List blobs) { + readers = blobs.stream().map(AzureJournalReader::new).iterator(); + } + + @Override + public String readLine() throws IOException { + String line; + do { + if (currentReader == null) { + if (!readers.hasNext()) { + return null; + } + currentReader = readers.next(); + } + do { + line = currentReader.readLine(); + } while ("".equals(line)); + if (line == null) { + currentReader.close(); + currentReader = null; + } + } while (line == null); + return line; + } + + @Override + public void close() throws IOException { + while (readers.hasNext()) { + readers.next().close(); + } + if (currentReader != null) { + currentReader.close(); + currentReader = null; + } + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8.java new file mode 100644 index 00000000000..28568c780f6 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Properties; + +public class AzureManifestFileV8 implements ManifestFile { + + private static final Logger log = LoggerFactory.getLogger(AzureManifestFileV8.class); + + private final CloudBlockBlob manifestBlob; + + public AzureManifestFileV8(CloudBlockBlob manifestBlob) { + this.manifestBlob = manifestBlob; + } + + @Override + public boolean exists() { + try { + return manifestBlob.exists(); + } catch (StorageException e) { + log.error("Can't check if the manifest exists", e); + return false; + } + } + + @Override + public Properties load() throws IOException { + Properties properties = new Properties(); + if (exists()) { + long length = manifestBlob.getProperties().getLength(); + byte[] data = new byte[(int) length]; + try { + manifestBlob.downloadToByteArray(data, 0); + } catch (StorageException e) { + throw new IOException(e); + } + properties.load(new ByteArrayInputStream(data)); + } + return properties; + } + + @Override + public void save(Properties properties) throws IOException { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + properties.store(bos, null); + + byte[] data = bos.toByteArray(); + try { + manifestBlob.uploadFromByteArray(data, 0, data.length); + } catch (StorageException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzurePersistenceV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzurePersistenceV8.java new file mode 100644 index 00000000000..f8d5de9ed3b --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzurePersistenceV8.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Paths; +import java.util.Date; +import java.util.EnumSet; +import java.util.concurrent.TimeUnit; + +import com.microsoft.azure.storage.OperationContext; +import com.microsoft.azure.storage.RequestCompletedEvent; +import com.microsoft.azure.storage.StorageEvent; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobListingDetails; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.microsoft.azure.storage.blob.ListBlobItem; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptionsV8; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.persistence.GCJournalFile; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; +import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; +import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AzurePersistenceV8 implements SegmentNodeStorePersistence { + private static final Logger log = LoggerFactory.getLogger(AzurePersistenceV8.class); + + protected final CloudBlobDirectory segmentstoreDirectory; + + protected WriteAccessController writeAccessController = new WriteAccessController(); + + public AzurePersistenceV8(CloudBlobDirectory segmentStoreDirectory) { + this.segmentstoreDirectory = segmentStoreDirectory; + + AzureRequestOptionsV8.applyDefaultRequestOptions(segmentStoreDirectory.getServiceClient().getDefaultRequestOptions()); + } + + @Override + public SegmentArchiveManager createArchiveManager(boolean mmap, boolean offHeapAccess, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { + attachRemoteStoreMonitor(remoteStoreMonitor); + return new AzureArchiveManagerV8(segmentstoreDirectory, ioMonitor, fileStoreMonitor, writeAccessController); + } + + @Override + public boolean segmentFilesExist() { + try { + for (ListBlobItem i : segmentstoreDirectory.listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), null, null)) { + if (i instanceof CloudBlobDirectory) { + CloudBlobDirectory dir = (CloudBlobDirectory) i; + String name = Paths.get(dir.getPrefix()).getFileName().toString(); + if (name.endsWith(".tar")) { + return true; + } + } + } + return false; + } catch (StorageException | URISyntaxException e) { + log.error("Can't check if the segment archives exists", e); + return false; + } + } + + @Override + public JournalFile getJournalFile() { + return new AzureJournalFileV8(segmentstoreDirectory, "journal.log", writeAccessController); + } + + @Override + public GCJournalFile getGCJournalFile() throws IOException { + return new AzureGCJournalFileV8(getAppendBlob("gc.log")); + } + + @Override + public ManifestFile getManifestFile() throws IOException { + return new AzureManifestFileV8(getBlockBlob("manifest")); + } + + @Override + public RepositoryLock lockRepository() throws IOException { + return new AzureRepositoryLockV8(getBlockBlob("repo.lock"), () -> { + log.warn("Lost connection to the Azure. The client will be closed."); + // TODO close the connection + }, writeAccessController).lock(); + } + + private CloudBlockBlob getBlockBlob(String path) throws IOException { + try { + return segmentstoreDirectory.getBlockBlobReference(path); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + private CloudAppendBlob getAppendBlob(String path) throws IOException { + try { + return segmentstoreDirectory.getAppendBlobReference(path); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + private static void attachRemoteStoreMonitor(RemoteStoreMonitor remoteStoreMonitor) { + OperationContext.getGlobalRequestCompletedEventHandler().addListener(new StorageEvent() { + + @Override + public void eventOccurred(RequestCompletedEvent e) { + Date startDate = e.getRequestResult().getStartDate(); + Date stopDate = e.getRequestResult().getStopDate(); + + if (startDate != null && stopDate != null) { + long requestDuration = stopDate.getTime() - startDate.getTime(); + remoteStoreMonitor.requestDuration(requestDuration, TimeUnit.MILLISECONDS); + } + + Exception exception = e.getRequestResult().getException(); + + if (exception == null) { + remoteStoreMonitor.requestCount(); + } else { + remoteStoreMonitor.requestError(); + } + } + + }); + } + + public CloudBlobDirectory getSegmentstoreDirectory() { + return segmentstoreDirectory; + } + + public void setWriteAccessController(WriteAccessController writeAccessController) { + this.writeAccessController = writeAccessController; + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java new file mode 100644 index 00000000000..0164a82a4d1 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.AccessCondition; +import com.microsoft.azure.storage.Constants; +import com.microsoft.azure.storage.RetryNoRetry; +import com.microsoft.azure.storage.StorageErrorCode; +import com.microsoft.azure.storage.StorageErrorCodeStrings; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Set; + +public class AzureRepositoryLockV8 implements RepositoryLock { + + private static final Logger log = LoggerFactory.getLogger(AzureRepositoryLockV8.class); + + private static final int TIMEOUT_SEC = Integer.getInteger("oak.segment.azure.lock.timeout", 0); + private static final Integer LEASE_RENEWAL_TIMEOUT_MS = 5000; + + public static final String LEASE_DURATION_PROP = "oak.segment.azure.lock.leaseDurationInSec"; + private final int leaseDuration = Integer.getInteger(LEASE_DURATION_PROP, 60); + + public static final String RENEWAL_INTERVAL_PROP = "oak.segment.azure.lock.leaseRenewalIntervalInSec"; + private final int renewalInterval = Integer.getInteger(RENEWAL_INTERVAL_PROP, 5); + + public static final String TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP = "oak.segment.azure.lock.blockWritesAfterInSec"; + private final int timeToWaitBeforeWriteBlock = Integer.getInteger(TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, 20); + + private final Runnable shutdownHook; + + private final CloudBlockBlob blob; + + private final Thread refresherThread; + + private static final String REFRESHER_THREAD_NAME = "AzureRepositoryLock-Refresher"; + + private boolean inError; + + private final int timeoutSec; + + private WriteAccessController writeAccessController; + + private String leaseId; + + private volatile boolean doUpdate; + + public AzureRepositoryLockV8(CloudBlockBlob blob, Runnable shutdownHook, WriteAccessController writeAccessController) { + this(blob, shutdownHook, writeAccessController, TIMEOUT_SEC); + } + + public AzureRepositoryLockV8(CloudBlockBlob blob, Runnable shutdownHook, WriteAccessController writeAccessController, int timeoutSec) { + this.shutdownHook = shutdownHook; + this.blob = blob; + this.refresherThread = new Thread(this::refreshLease, REFRESHER_THREAD_NAME); + this.refresherThread.setDaemon(true); + this.timeoutSec = timeoutSec; + this.writeAccessController = writeAccessController; + + if (leaseDuration < timeToWaitBeforeWriteBlock || timeToWaitBeforeWriteBlock < renewalInterval) { + throw new IllegalStateException(String.format("The value of %s must be greater than %s and the value of %s must be greater than %s", + LEASE_DURATION_PROP, TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, RENEWAL_INTERVAL_PROP)); + } + } + + public AzureRepositoryLockV8 lock() throws IOException { + long start = System.currentTimeMillis(); + Exception ex = null; + do { + try { + blob.openOutputStream().close(); + + log.info("{} = {}", LEASE_DURATION_PROP, leaseDuration); + log.info("{} = {}", RENEWAL_INTERVAL_PROP, renewalInterval); + log.info("{} = {}", TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, timeToWaitBeforeWriteBlock); + + leaseId = blob.acquireLease(leaseDuration, null); + writeAccessController.enableWriting(); + log.info("Acquired lease {}", leaseId); + } catch (Exception e) { + if (ex == null) { + log.info("Can't acquire the lease. Retrying every 1s. Timeout is set to {}s.", timeoutSec); + } + ex = e; + if ((System.currentTimeMillis() - start) / 1000 < timeoutSec) { + try { + Thread.sleep(1000); + } catch (InterruptedException e1) { + throw new IOException(e1); + } + } else { + break; + } + } + } while (leaseId == null); + if (leaseId == null) { + log.error("Can't acquire the lease in {}s.", timeoutSec); + throw new IOException(ex); + } else { + refresherThread.start(); + return this; + } + } + + private void refreshLease() { + log.info("Starting the lease renewal loop"); + doUpdate = true; + long lastUpdate = 0; + setInError(false); + while (doUpdate) { + try { + long timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; + try { + if (timeSinceLastUpdate > renewalInterval) { + + BlobRequestOptions requestOptions = new BlobRequestOptions(); + requestOptions.setMaximumExecutionTimeInMs(LEASE_RENEWAL_TIMEOUT_MS); + requestOptions.setRetryPolicyFactory(new RetryNoRetry()); + blob.renewLease(AccessCondition.generateLeaseCondition(leaseId), requestOptions, null); + + writeAccessController.enableWriting(); + if (isInError()) { + log.info("Lease renewal successful again."); + setInError(false); + } + lastUpdate = System.currentTimeMillis(); + } + } catch (Exception e) { + timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; + + if (timeSinceLastUpdate > timeToWaitBeforeWriteBlock) { + writeAccessController.disableWriting(); + } + + if (e instanceof StorageException) { + StorageException storageException = (StorageException) e; + if (Set.of(StorageErrorCodeStrings.OPERATION_TIMED_OUT + , StorageErrorCode.SERVICE_INTERNAL_ERROR + , StorageErrorCodeStrings.SERVER_BUSY + , StorageErrorCodeStrings.INTERNAL_ERROR).contains(storageException.getErrorCode())) { + log.warn("Could not renew the lease due to the operation timeout or service unavailability. Retry in progress ...", e); + } else if (storageException.getHttpStatusCode() == Constants.HeaderConstants.HTTP_UNUSED_306) { + log.warn("Client side error. Retry in progress ...", e); + } else { + log.warn("Could not renew lease due to storage exception. Retry in progress ... ", e); + } + } else { + log.error("Can't renew the lease", e); + shutdownHook.run(); + doUpdate = false; + return; + } + } + waitABit(100); + } catch (Throwable t) { + if (!isInError()) { + log.error("Unexpected error in the lease renewal loop, trying to recover", t); + setInError(true); + } + waitABit(100); + } + } + log.info("Lease renewal loop exiting."); + } + + @Override + public void unlock() throws IOException { + doUpdate = false; + try { + refresherThread.join(60000); + } catch (InterruptedException e) { + throw new IOException(e); + } finally { + releaseLease(); + } + } + + private void releaseLease() throws IOException { + try { + blob.releaseLease(AccessCondition.generateLeaseCondition(leaseId)); + blob.delete(); + log.info("Released lease {}", leaseId); + leaseId = null; + } catch (StorageException e) { + throw new IOException(e); + } + } + + private void setInError(boolean inError) { + this.inError = inError; + refresherThread.setName(REFRESHER_THREAD_NAME + (inError ? "-InError" : "")); + } + + private boolean isInError() { + return inError; + } + + private void waitABit(long millis) { + try { + Thread.sleep(millis); + } catch (InterruptedException e) { + // ignore + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveReaderV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveReaderV8.java new file mode 100644 index 00000000000..de69711b6e0 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveReaderV8.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.readBufferFully; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.Map; +import java.util.UUID; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlob; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlockBlob; + +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.azure.AzureBlobMetadata; +import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; + +public class AzureSegmentArchiveReaderV8 extends AbstractRemoteSegmentArchiveReader { + + private final CloudBlobDirectory archiveDirectory; + + private final long length; + + protected AzureSegmentArchiveReaderV8(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor) throws IOException { + super(ioMonitor); + this.archiveDirectory = archiveDirectory; + this.length = computeArchiveIndexAndLength(); + } + + @Override + public long length() { + return length; + } + + @Override + public String getName() { + return AzureUtilitiesV8.getName(archiveDirectory); + } + + @Override + protected long computeArchiveIndexAndLength() throws IOException { + long length = 0; + for (CloudBlob blob : AzureUtilitiesV8.getBlobs(archiveDirectory)) { + Map metadata = blob.getMetadata(); + if (AzureBlobMetadata.isSegment(metadata)) { + RemoteSegmentArchiveEntry indexEntry = AzureBlobMetadata.toIndexEntry(metadata, (int) blob.getProperties().getLength()); + index.put(new UUID(indexEntry.getMsb(), indexEntry.getLsb()), indexEntry); + } + length += blob.getProperties().getLength(); + } + + return length; + } + + @Override + protected void doReadSegmentToBuffer(String segmentFileName, Buffer buffer) throws IOException { + readBufferFully(getBlob(segmentFileName), buffer); + } + + @Override + protected Buffer doReadDataFile(String extension) throws IOException { + return readBlob(getName() + extension); + } + + @Override + protected File archivePathAsFile() { + return new File(archiveDirectory.getUri().getPath()); + } + + private CloudBlockBlob getBlob(String name) throws IOException { + try { + return archiveDirectory.getBlockBlobReference(name); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + private Buffer readBlob(String name) throws IOException { + try { + CloudBlockBlob blob = getBlob(name); + if (!blob.exists()) { + return null; + } + long length = blob.getProperties().getLength(); + Buffer buffer = Buffer.allocate((int) length); + AzureUtilitiesV8.readBufferFully(blob, buffer); + return buffer; + } catch (StorageException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java new file mode 100644 index 00000000000..21ecfe8d769 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.readBufferFully; +import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.getSegmentFileName; +import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.OFF_HEAP; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.concurrent.TimeUnit; + +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import org.apache.jackrabbit.guava.common.base.Stopwatch; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlockBlob; + +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.azure.AzureBlobMetadata; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptionsV8; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.azure.util.Retrier; +import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveWriter; +import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; + +public class AzureSegmentArchiveWriterV8 extends AbstractRemoteSegmentArchiveWriter { + + private final CloudBlobDirectory archiveDirectory; + + private final Retrier retrier = Retrier.withParams( + Integer.getInteger("azure.segment.archive.writer.retries.max", 16), + Integer.getInteger("azure.segment.archive.writer.retries.intervalMs", 5000) + ); + + private final BlobRequestOptions writeOptimisedBlobRequestOptions; + + public AzureSegmentArchiveWriterV8(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor, FileStoreMonitor monitor, WriteAccessController writeAccessController) { + super(ioMonitor, monitor); + this.archiveDirectory = archiveDirectory; + this.writeAccessController = writeAccessController; + this.writeOptimisedBlobRequestOptions = AzureRequestOptionsV8.optimiseForWriteOperations(archiveDirectory.getServiceClient().getDefaultRequestOptions()); + } + + @Override + public String getName() { + return AzureUtilitiesV8.getName(archiveDirectory); + } + + @Override + protected void doWriteArchiveEntry(RemoteSegmentArchiveEntry indexEntry, byte[] data, int offset, int size) throws IOException { + + writeAccessController.checkWritingAllowed(); + + long msb = indexEntry.getMsb(); + long lsb = indexEntry.getLsb(); + String segmentName = getSegmentFileName(indexEntry); + CloudBlockBlob blob = getBlob(segmentName); + ioMonitor.beforeSegmentWrite(new File(blob.getName()), msb, lsb, size); + Stopwatch stopwatch = Stopwatch.createStarted(); + try { + blob.setMetadata(AzureBlobMetadata.toSegmentMetadata(indexEntry)); + blob.uploadFromByteArray(data, offset, size, null, writeOptimisedBlobRequestOptions, null); + blob.uploadMetadata(null, writeOptimisedBlobRequestOptions, null); + } catch (StorageException e) { + throw new IOException(e); + } + ioMonitor.afterSegmentWrite(new File(blob.getName()), msb, lsb, size, stopwatch.elapsed(TimeUnit.NANOSECONDS)); + } + + @Override + protected Buffer doReadArchiveEntry(RemoteSegmentArchiveEntry indexEntry) throws IOException { + Buffer buffer; + if (OFF_HEAP) { + buffer = Buffer.allocateDirect(indexEntry.getLength()); + } else { + buffer = Buffer.allocate(indexEntry.getLength()); + } + readBufferFully(getBlob(getSegmentFileName(indexEntry)), buffer); + return buffer; + } + + @Override + protected void doWriteDataFile(byte[] data, String extension) throws IOException { + retrier.execute(() -> { + try { + writeAccessController.checkWritingAllowed(); + + getBlob(getName() + extension).uploadFromByteArray(data, 0, data.length, null, writeOptimisedBlobRequestOptions, null); + } catch (StorageException e) { + throw new IOException(e); + } + }); + } + + @Override + protected void afterQueueClosed() throws IOException { + retrier.execute(() -> { + try { + writeAccessController.checkWritingAllowed(); + + getBlob("closed").uploadFromByteArray(new byte[0], 0, 0, null, writeOptimisedBlobRequestOptions, null); + } catch (StorageException e) { + throw new IOException(e); + } + }); + } + + @Override + protected void afterQueueFlushed() { + // do nothing + } + + private CloudBlockBlob getBlob(String name) throws IOException { + try { + return archiveDirectory.getBlockBlobReference(name); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8.java new file mode 100644 index 00000000000..2e2bf2baf04 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobContainerClientBuilder; +import com.microsoft.azure.storage.CloudStorageAccount; +import com.microsoft.azure.storage.LocationMode; +import com.microsoft.azure.storage.StorageCredentials; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.microsoft.azure.storage.blob.CloudBlobClient; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import org.apache.commons.lang3.StringUtils; +import org.apache.jackrabbit.oak.segment.azure.Configuration; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +public class AzureSegmentStoreV8 { + + private static final Logger log = LoggerFactory.getLogger(AzureSegmentStoreV8.class); + + public static final String DEFAULT_ENDPOINT_SUFFIX = "core.windows.net"; + + private static AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; + + public static AzurePersistenceV8 createAzurePersistenceFrom(Configuration configuration) throws IOException { + if (!StringUtils.isBlank(configuration.connectionURL())) { + return createPersistenceFromConnectionURL(configuration); + } + if (!StringUtils.isAnyBlank(configuration.clientId(), configuration.clientSecret(), configuration.tenantId())) { + return createPersistenceFromServicePrincipalCredentials(configuration); + } + if (!StringUtils.isBlank(configuration.sharedAccessSignature())) { + return createPersistenceFromSasUri(configuration); + } + return createPersistenceFromAccessKey(configuration); + } + + private static AzurePersistenceV8 createPersistenceFromAccessKey(Configuration configuration) throws IOException { + StringBuilder connectionString = new StringBuilder(); + connectionString.append("DefaultEndpointsProtocol=https;"); + connectionString.append("AccountName=").append(configuration.accountName()).append(';'); + connectionString.append("AccountKey=").append(configuration.accessKey()).append(';'); + if (!StringUtils.isBlank(configuration.blobEndpoint())) { + connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); + } + + BlobContainerClient blobContainerClient = new BlobContainerClientBuilder() + .containerName(configuration.containerName()) + .connectionString(connectionString.toString()).buildClient(); + + return createAzurePersistence(connectionString.toString(), configuration, true); + } + + private static AzurePersistenceV8 createPersistenceFromSasUri(Configuration configuration) throws IOException { + StringBuilder connectionString = new StringBuilder(); + connectionString.append("DefaultEndpointsProtocol=https;"); + connectionString.append("AccountName=").append(configuration.accountName()).append(';'); + connectionString.append("SharedAccessSignature=").append(configuration.sharedAccessSignature()).append(';'); + if (!StringUtils.isBlank(configuration.blobEndpoint())) { + connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); + } + return createAzurePersistence(connectionString.toString(), configuration, false); + } + + @NotNull + private static AzurePersistenceV8 createPersistenceFromConnectionURL(Configuration configuration) throws IOException { + return createAzurePersistence(configuration.connectionURL(), configuration, true); + } + + @NotNull + private static AzurePersistenceV8 createPersistenceFromServicePrincipalCredentials(Configuration configuration) throws IOException { + azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); + StorageCredentials storageCredentialsToken = azureStorageCredentialManagerV8.getStorageCredentialAccessTokenFromServicePrincipals(configuration.accountName(), configuration.clientId(), configuration.clientSecret(), configuration.tenantId()); + + try { + CloudStorageAccount cloud = new CloudStorageAccount(storageCredentialsToken, true, DEFAULT_ENDPOINT_SUFFIX, configuration.accountName()); + return createAzurePersistence(cloud, configuration, true); + } catch (StorageException | URISyntaxException e) { + throw new IOException(e); + } + } + + @NotNull + private static AzurePersistenceV8 createAzurePersistence(String connectionString, Configuration configuration, boolean createContainer) throws IOException { + try { + CloudStorageAccount cloud = CloudStorageAccount.parse(connectionString); + log.info("Connection string: '{}'", cloud); + return createAzurePersistence(cloud, configuration, createContainer); + } catch (StorageException | URISyntaxException | InvalidKeyException e) { + throw new IOException(e); + } + } + + @NotNull + private static AzurePersistenceV8 createAzurePersistence(CloudStorageAccount cloud, Configuration configuration, boolean createContainer) throws URISyntaxException, StorageException { + CloudBlobClient cloudBlobClient = cloud.createCloudBlobClient(); + BlobRequestOptions blobRequestOptions = new BlobRequestOptions(); + + if (configuration.enableSecondaryLocation()) { + blobRequestOptions.setLocationMode(LocationMode.PRIMARY_THEN_SECONDARY); + } + cloudBlobClient.setDefaultRequestOptions(blobRequestOptions); + + CloudBlobContainer container = cloudBlobClient.getContainerReference(configuration.containerName()); + if (createContainer && !container.exists()) { + container.create(); + } + String path = normalizePath(configuration.rootPath()); + return new AzurePersistenceV8(container.getDirectoryReference(path)); + } + + @NotNull + private static String normalizePath(@NotNull String rootPath) { + if (rootPath.length() > 0 && rootPath.charAt(0) == '/') { + return rootPath.substring(1); + } + return rootPath; + } + +} \ No newline at end of file diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureStorageCredentialManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureStorageCredentialManagerV8.java similarity index 93% rename from oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureStorageCredentialManager.java rename to oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureStorageCredentialManagerV8.java index 4f962f10809..6c92481979c 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureStorageCredentialManager.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureStorageCredentialManagerV8.java @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.jackrabbit.oak.segment.azure; +package org.apache.jackrabbit.oak.segment.azure.v8; import com.azure.core.credential.AccessToken; import com.azure.core.credential.TokenRequestContext; @@ -39,13 +39,13 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_SECRET_KEY; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_TENANT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_SECRET_KEY; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; -public class AzureStorageCredentialManager implements Closeable { - private static final Logger log = LoggerFactory.getLogger(AzureStorageCredentialManager.class); +public class AzureStorageCredentialManagerV8 implements Closeable { + private static final Logger log = LoggerFactory.getLogger(AzureStorageCredentialManagerV8.class); private static final String AZURE_DEFAULT_SCOPE = "https://storage.azure.com/.default"; private static final long TOKEN_REFRESHER_INITIAL_DELAY = 45L; private static final long TOKEN_REFRESHER_DELAY = 1L; diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureUtilitiesV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureUtilitiesV8.java new file mode 100644 index 00000000000..382eef83ac2 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureUtilitiesV8.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.CloudStorageAccount; +import com.microsoft.azure.storage.ResultContinuation; +import com.microsoft.azure.storage.ResultSegment; +import com.microsoft.azure.storage.StorageCredentials; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.StorageUri; +import com.microsoft.azure.storage.blob.BlobListingDetails; +import com.microsoft.azure.storage.blob.CloudBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.LeaseStatus; +import com.microsoft.azure.storage.blob.ListBlobItem; +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.OutputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.file.Paths; +import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; + +public final class AzureUtilitiesV8 { + + public static final String AZURE_ACCOUNT_NAME = "AZURE_ACCOUNT_NAME"; + public static final String AZURE_SECRET_KEY = "AZURE_SECRET_KEY"; + public static final String AZURE_TENANT_ID = "AZURE_TENANT_ID"; + public static final String AZURE_CLIENT_ID = "AZURE_CLIENT_ID"; + public static final String AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET"; + + private static final Logger log = LoggerFactory.getLogger(AzureUtilitiesV8.class); + + private AzureUtilitiesV8() { + } + + public static String getName(CloudBlob blob) { + return Paths.get(blob.getName()).getFileName().toString(); + } + + public static String getName(CloudBlobDirectory directory) { + return Paths.get(directory.getUri().getPath()).getFileName().toString(); + } + + public static List getBlobs(CloudBlobDirectory directory) throws IOException { + List blobList = new ArrayList<>(); + ResultContinuation token = null; + do { + ResultSegment result = listBlobsInSegments(directory, token); //get the blobs in pages of 5000 + for (ListBlobItem b : result.getResults()) { //add resultant blobs to list + if (b instanceof CloudBlob) { + CloudBlob cloudBlob = (CloudBlob) b; + blobList.add(cloudBlob); + } + } + token = result.getContinuationToken(); + } while (token != null); + return blobList; + } + + public static void readBufferFully(CloudBlob blob, Buffer buffer) throws IOException { + try { + blob.download(new ByteBufferOutputStream(buffer)); + buffer.flip(); + } catch (StorageException e) { + if (e.getHttpStatusCode() == 404) { + log.error("Blob not found in the remote repository: {}", blob.getName()); + throw new FileNotFoundException("Blob not found in the remote repository: " + blob.getName()); + } + throw new RepositoryNotReachableException(e); + } + } + + public static void deleteAllEntries(CloudBlobDirectory directory) throws IOException { + getBlobs(directory).forEach(b -> { + try { + b.deleteIfExists(); + } catch (StorageException e) { + log.error("Can't delete blob {}", b.getUri().getPath(), e); + } + }); + } + + public static CloudBlobDirectory cloudBlobDirectoryFrom(StorageCredentials credentials, + String uri, String dir) throws URISyntaxException, StorageException { + StorageUri storageUri = new StorageUri(new URI(uri)); + CloudBlobContainer container = new CloudBlobContainer(storageUri, credentials); + + container.createIfNotExists(); + + return container.getDirectoryReference(dir); + } + + public static CloudBlobDirectory cloudBlobDirectoryFrom(String connection, String containerName, + String dir) throws InvalidKeyException, URISyntaxException, StorageException { + CloudStorageAccount cloud = CloudStorageAccount.parse(connection); + CloudBlobContainer container = cloud.createCloudBlobClient().getContainerReference(containerName); + container.createIfNotExists(); + + return container.getDirectoryReference(dir); + } + + private static ResultSegment listBlobsInSegments(CloudBlobDirectory directory, + ResultContinuation token) throws IOException { + ResultSegment result = null; + IOException lastException = null; + for (int sleep = 10; sleep <= 10000; sleep *= 10) { //increment the sleep time in steps. + try { + result = directory.listBlobsSegmented( + null, + false, + EnumSet.of(BlobListingDetails.METADATA), + 5000, + token, + null, + null); + break; //we have the results, no need to retry + } catch (StorageException | URISyntaxException e) { + lastException = new IOException(e); + try { + Thread.sleep(sleep); //Sleep and retry + } catch (InterruptedException ex) { + log.warn("Interrupted", e); + } + } + } + + if (result == null) { + throw lastException; + } else { + return result; + } + } + + public static void deleteAllBlobs(@NotNull CloudBlobDirectory directory) throws URISyntaxException, StorageException, InterruptedException { + for (ListBlobItem blobItem : directory.listBlobs()) { + if (blobItem instanceof CloudBlob) { + CloudBlob cloudBlob = (CloudBlob) blobItem; + if (cloudBlob.getProperties().getLeaseStatus() == LeaseStatus.LOCKED) { + cloudBlob.breakLease(0); + } + cloudBlob.deleteIfExists(); + } else if (blobItem instanceof CloudBlobDirectory) { + CloudBlobDirectory cloudBlobDirectory = (CloudBlobDirectory) blobItem; + deleteAllBlobs(cloudBlobDirectory); + } + } + } + + private static class ByteBufferOutputStream extends OutputStream { + + @NotNull + private final Buffer buffer; + + public ByteBufferOutputStream(@NotNull Buffer buffer) { + this.buffer = buffer; + } + + @Override + public void write(int b) { + buffer.put((byte) b); + } + + @Override + public void write(@NotNull byte[] bytes, int offset, int length) { + buffer.put(bytes, offset, length); + } + } + +} + + diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/ReverseFileReaderV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/ReverseFileReaderV8.java new file mode 100644 index 00000000000..b81e28588b6 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/ReverseFileReaderV8.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.OperationContext; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlob; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; + +import static java.lang.Math.min; + +public class ReverseFileReaderV8 { + + private static final int BUFFER_SIZE = 16 * 1024; + + private int bufferSize; + + private final CloudBlob blob; + + private byte[] buffer; + + private int bufferOffset; + + private int fileOffset; + + public ReverseFileReaderV8(CloudBlob blob) throws StorageException { + this (blob, BUFFER_SIZE); + } + + public ReverseFileReaderV8(CloudBlob blob, int bufferSize) throws StorageException { + this.blob = blob; + if (blob.exists()) { + this.fileOffset = (int) blob.getProperties().getLength(); + } else { + this.fileOffset = 0; + } + this.bufferSize = bufferSize; + } + + private void readBlock() throws IOException { + if (buffer == null) { + buffer = new byte[min(fileOffset, bufferSize)]; + } else if (fileOffset < buffer.length) { + buffer = new byte[fileOffset]; + } + + if (buffer.length > 0) { + fileOffset -= buffer.length; + try { + OperationContext opContext = new OperationContext(); + HashMap userHeaders = new HashMap<>(); + userHeaders.put("If-Match", "*"); + opContext.setUserHeaders(userHeaders); + blob.downloadRangeToByteArray(fileOffset, Long.valueOf(buffer.length), buffer, 0, null, null, opContext); + } catch (StorageException e) { + throw new IOException(e); + } + } + bufferOffset = buffer.length; + } + + private String readUntilNewLine() { + if (bufferOffset == -1) { + return ""; + } + int stop = bufferOffset; + while (--bufferOffset >= 0) { + if (buffer[bufferOffset] == '\n') { + break; + } + } + // bufferOffset points either the previous '\n' character or -1 + return new String(buffer, bufferOffset + 1, stop - bufferOffset - 1, Charset.defaultCharset()); + } + + public String readLine() throws IOException { + if (bufferOffset == -1 && fileOffset == 0) { + return null; + } + + if (buffer == null) { + readBlock(); + } + + List result = new ArrayList<>(1); + while (true) { + result.add(readUntilNewLine()); + if (bufferOffset > -1) { // stopped on the '\n' + break; + } + if (fileOffset == 0) { // reached the beginning of the file + break; + } + readBlock(); + } + Collections.reverse(result); + return String.join("", result); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java index 0c91fd0b0a6..f6b19dffd7b 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java @@ -16,17 +16,16 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageErrorCodeStrings; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.ListBlobItem; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.specialized.BlobLeaseClient; +import com.azure.storage.blob.specialized.BlobLeaseClientBuilder; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.api.PropertyState; import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.commons.Buffer; import org.apache.jackrabbit.oak.segment.SegmentId; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; @@ -53,7 +52,11 @@ import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.junit.*; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.Assert; import org.junit.contrib.java.lang.system.ProvideSystemProperty; import org.junit.rules.TemporaryFolder; import org.mockito.Mockito; @@ -74,12 +77,7 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.IsNot.not; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.Assert.*; public class AzureArchiveManagerTest { @@ -89,17 +87,21 @@ public class AzureArchiveManagerTest { @Rule public TemporaryFolder folder = new TemporaryFolder(new File("target")); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; private AzurePersistence azurePersistence; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - azurePersistence = new AzurePersistence(container.getDirectoryReference("oak")); + azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); azurePersistence.setWriteAccessController(writeAccessController); } @@ -109,7 +111,7 @@ public void setup() throws StorageException, InvalidKeyException, URISyntaxExcep .and(AzureRepositoryLock.TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, "9"); @Test - public void testRecovery() throws StorageException, URISyntaxException, IOException { + public void testRecovery() throws BlobStorageException, IOException { SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -123,7 +125,7 @@ public void testRecovery() throws StorageException, URISyntaxException, IOExcept writer.flush(); writer.close(); - container.getBlockBlobReference("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); + readBlobContainerClient.getBlobClient("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); LinkedHashMap recovered = new LinkedHashMap<>(); manager.recoverEntries("data00000a.tar", recovered); @@ -131,7 +133,7 @@ public void testRecovery() throws StorageException, URISyntaxException, IOExcept } @Test - public void testBackupWithRecoveredEntries() throws StorageException, URISyntaxException, IOException { + public void testBackupWithRecoveredEntries() throws BlobStorageException, IOException { SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -145,7 +147,7 @@ public void testBackupWithRecoveredEntries() throws StorageException, URISyntaxE writer.flush(); writer.close(); - container.getBlockBlobReference("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); + readBlobContainerClient.getBlobClient("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); LinkedHashMap recovered = new LinkedHashMap<>(); manager.recoverEntries("data00000a.tar", recovered); @@ -153,17 +155,17 @@ public void testBackupWithRecoveredEntries() throws StorageException, URISyntaxE manager.backup("data00000a.tar", "data00000a.tar.bak", recovered.keySet()); for (int i = 0; i <= 4; i++) { - assertTrue(container.getBlockBlobReference("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); + assertTrue(readBlobContainerClient.getBlobClient("oak/data00000a.tar/000" + i + "." + uuids.get(i)).exists()); } for (int i = 5; i <= 9; i++) { - assertFalse(String.format("Segment %s.??? should have been deleted.", "oak/data00000a.tar/000"+ i), container.getBlockBlobReference("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); + assertFalse(String.format("Segment %s.??? should have been deleted.", "oak/data00000a.tar/000" + i), readBlobContainerClient.getBlobClient("oak/data00000a.tar/000" + i + "." + uuids.get(i)).exists()); } } @Test - public void testUncleanStop() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { - AzurePersistence p = new AzurePersistence(container.getDirectoryReference("oak")); + public void testUncleanStop() throws IOException, InvalidFileStoreVersionException, CommitFailedException, BlobStorageException { + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -171,9 +173,10 @@ public void testUncleanStop() throws URISyntaxException, IOException, InvalidFil segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); fs.close(); - container.getBlockBlobReference("oak/data00000a.tar/closed").delete(); - container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.brf").delete(); - container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.gph").delete(); + + readBlobContainerClient.getBlobClient("oak/data00000a.tar/closed").delete(); + readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.brf").delete(); + readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.gph").delete(); fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); @@ -183,8 +186,8 @@ public void testUncleanStop() throws URISyntaxException, IOException, InvalidFil @Test // see OAK-8566 - public void testUncleanStopWithEmptyArchive() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { - AzurePersistence p = new AzurePersistence(container.getDirectoryReference("oak")); + public void testUncleanStopWithEmptyArchive() throws IOException, InvalidFileStoreVersionException, CommitFailedException, BlobStorageException { + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -201,9 +204,12 @@ public void testUncleanStopWithEmptyArchive() throws URISyntaxException, IOExcep fs.close(); // remove the segment 0000 from the second archive - ListBlobItem segment0000 = container.listBlobs("oak/data00001a.tar/0000.").iterator().next(); - ((CloudBlob) segment0000).delete(); - container.getBlockBlobReference("oak/data00001a.tar/closed").delete(); + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix("oak/data00001a.tar/0000."); + + BlobItem blobItem = readBlobContainerClient.listBlobs(listBlobsOptions, null).iterator().next(); + readBlobContainerClient.getBlobClient(blobItem.getName()).delete(); + readBlobContainerClient.getBlobClient("oak/data00001a.tar/closed").delete(); fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); @@ -212,8 +218,8 @@ public void testUncleanStopWithEmptyArchive() throws URISyntaxException, IOExcep } @Test - public void testUncleanStopSegmentMissing() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { - AzurePersistence p = new AzurePersistence(container.getDirectoryReference("oak")); + public void testUncleanStopSegmentMissing() throws IOException, InvalidFileStoreVersionException, CommitFailedException, BlobStorageException { + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -243,23 +249,31 @@ public void testUncleanStopSegmentMissing() throws URISyntaxException, IOExcepti fs.close(); // remove the segment 0002 from the second archive - ListBlobItem segment0002 = container.listBlobs("oak/data00001a.tar/0002.").iterator().next(); - ((CloudBlob) segment0002).delete(); - container.getBlockBlobReference("oak/data00001a.tar/closed").delete(); + ListBlobsOptions listOptions = new ListBlobsOptions(); + listOptions.setPrefix("oak/data00001a.tar/0002."); + BlobItem blobItem = readBlobContainerClient.listBlobs(listOptions, null).stream().iterator().next(); + readBlobContainerClient.getBlobClient(blobItem.getName()).getBlockBlobClient().delete(); + readBlobContainerClient.getBlobClient("oak/data00001a.tar/closed").delete(); fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); assertEquals("bar", segmentNodeStore.getRoot().getString("foo")); //recovered archive data00001a.tar should not contain segments 0002 and 0003 - assertFalse(container.listBlobs("oak/data00001a.tar/0002.").iterator().hasNext()); - assertFalse(container.listBlobs("oak/data00001a.tar/0003.").iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar/0002."); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar/0003."); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); - assertTrue("Backup directory should have been created", container.listBlobs("oak/data00001a.tar.bak").iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar.bak"); + assertTrue("Backup directory should have been created", readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); //backup has all segments but 0002 since it was deleted before recovery - assertTrue(container.listBlobs("oak/data00001a.tar.bak/0001.").iterator().hasNext()); - assertFalse(container.listBlobs("oak/data00001a.tar.bak/0002.").iterator().hasNext()); - assertTrue(container.listBlobs("oak/data00001a.tar.bak/0003.").iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar.bak/0001."); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar.bak/0002."); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar.bak/0003."); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); //verify content from recovered segments preserved assertEquals("bar1", segmentNodeStore.getRoot().getString("foo1")); @@ -270,7 +284,7 @@ public void testUncleanStopSegmentMissing() throws URISyntaxException, IOExcepti } @Test - public void testExists() throws IOException, URISyntaxException { + public void testExists() throws IOException { SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -289,7 +303,7 @@ public void testExists() throws IOException, URISyntaxException { } @Test - public void testArchiveExistsAfterFlush() throws URISyntaxException, IOException { + public void testArchiveExistsAfterFlush() throws IOException { SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -301,7 +315,7 @@ public void testArchiveExistsAfterFlush() throws URISyntaxException, IOException } @Test(expected = FileNotFoundException.class) - public void testSegmentDeletedAfterCreatingReader() throws IOException, URISyntaxException, StorageException { + public void testSegmentDeletedAfterCreatingReader() throws IOException, BlobStorageException { SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -315,8 +329,10 @@ public void testSegmentDeletedAfterCreatingReader() throws IOException, URISynta Buffer segment = reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); assertNotNull(segment); - ListBlobItem segment0000 = container.listBlobs("oak/data00000a.tar/0000.").iterator().next(); - ((CloudBlob) segment0000).delete(); + ListBlobsOptions listOptions = new ListBlobsOptions(); + listOptions.setPrefix("oak/data00000a.tar/0000."); + BlobItem segment0000 = readBlobContainerClient.listBlobs(listOptions, null).iterator().next(); + readBlobContainerClient.getBlobClient(segment0000.getName()).delete(); try { // FileNotFoundException should be thrown here @@ -328,9 +344,9 @@ public void testSegmentDeletedAfterCreatingReader() throws IOException, URISynta } @Test(expected = SegmentNotFoundException.class) - public void testMissngSegmentDetectedInFileStore() throws IOException, StorageException, URISyntaxException, InvalidFileStoreVersionException { + public void testMissingSegmentDetectedInFileStore() throws IOException, BlobStorageException, InvalidFileStoreVersionException { - AzurePersistence azurePersistence = new AzurePersistence(container.getDirectoryReference("oak")); + AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(azurePersistence).build(); SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); @@ -346,16 +362,18 @@ public void testMissngSegmentDetectedInFileStore() throws IOException, StorageEx Buffer segment = reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); assertNotNull(segment); - ListBlobItem segment0000 = container.listBlobs("oak/data00000a.tar/0000.").iterator().next(); - ((CloudBlob) segment0000).delete(); + ListBlobsOptions listOptions = new ListBlobsOptions(); + listOptions.setPrefix("oak/data00000a.tar/0000."); + BlobItem segment0000 = readBlobContainerClient.listBlobs(listOptions, null).iterator().next(); + readBlobContainerClient.getBlobClient(segment0000.getName()).delete(); // SegmentNotFoundException should be thrown here fileStore.readSegment(new SegmentId(fileStore, u.getMostSignificantBits(), u.getLeastSignificantBits())); } @Test - public void testReadOnlyRecovery() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistence rwPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + public void testReadOnlyRecovery() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -363,11 +381,14 @@ public void testReadOnlyRecovery() throws URISyntaxException, InvalidFileStoreVe segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); rwFileStore.flush(); - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + ListBlobsOptions listOptions = new ListBlobsOptions(); + listOptions.setPrefix("oak/data00000a.tar"); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar.ro.bak"); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); // create read-only FS - AzurePersistence roPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly(); PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() @@ -379,14 +400,16 @@ public void testReadOnlyRecovery() throws URISyntaxException, InvalidFileStoreVe roFileStore.close(); rwFileStore.close(); - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar"); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); // after creating a read-only FS, the recovery procedure should not be started since there is another running Oak process - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar.ro.bak"); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); } @Test - public void testCachingPersistenceTarRecovery() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistence rwPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + public void testCachingPersistenceTarRecovery() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(rwPersistence).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -394,11 +417,14 @@ public void testCachingPersistenceTarRecovery() throws URISyntaxException, Inval segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); rwFileStore.flush(); - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + ListBlobsOptions listOptions = new ListBlobsOptions(); + listOptions.setPrefix("oak/data00000a.tar"); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar.ro.bak"); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); // create files store with split persistence - AzurePersistence azureSharedPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + AzurePersistence azureSharedPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); CachingPersistence cachingPersistence = new CachingPersistence(createPersistenceCache(), azureSharedPersistence); File localFolder = folder.newFolder(); @@ -408,14 +434,16 @@ public void testCachingPersistenceTarRecovery() throws URISyntaxException, Inval // exception should not be thrown here FileStore splitPersistenceFileStore = FileStoreBuilder.fileStoreBuilder(localFolder).withCustomPersistence(splitPersistence).build(); - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar"); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); // after creating a read-only FS, the recovery procedure should not be started since there is another running Oak process - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar.ro.bak"); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); } @Test - public void testCollectBlobReferencesForReadOnlyFileStore() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistence rwPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + public void testCollectBlobReferencesForReadOnlyFileStore() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -424,10 +452,10 @@ public void testCollectBlobReferencesForReadOnlyFileStore() throws URISyntaxExce rwFileStore.flush(); // file with binary references is not created yet - assertFalse("brf file should not be present", container.getDirectoryReference("oak/data00000a.tar").getBlockBlobReference("data00000a.tar.brf").exists()); + assertFalse("brf file should not be present", readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.brf").exists()); // create read-only FS, while the rw FS is still open - AzurePersistence roPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() @@ -444,8 +472,8 @@ public void testCollectBlobReferencesForReadOnlyFileStore() throws URISyntaxExce } @Test - public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistence rwPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -454,10 +482,10 @@ public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws URISy rwFileStore.flush(); // file with binary references is not created yet - assertFalse("brf file should not be present", container.getDirectoryReference("oak/data00000a.tar").getBlockBlobReference("data00000a.tar.brf").exists()); + assertFalse("brf file should not be present", readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.brf").exists()); // create read-only FS, while the rw FS is still open - AzurePersistence roPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() @@ -478,21 +506,27 @@ public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws URISy @Test public void testWriteAfterLosingRepoLock() throws Exception { - CloudBlobDirectory oakDirectory = container.getDirectoryReference("oak"); - AzurePersistence rwPersistence = new AzurePersistence(oakDirectory); + BlobContainerClient oakDirectory = readBlobContainerClient.getBlobClient("oak").getContainerClient(); + BlobContainerClient writeOakDirectory = writeBlobContainerClient.getBlobClient("oak").getContainerClient(); + BlobContainerClient noRetryOakDirectory = noRetryBlobContainerClient.getBlobClient("oak").getContainerClient(); + AzurePersistence rwPersistence = new AzurePersistence(oakDirectory, writeOakDirectory, noRetryOakDirectory, ""); + + BlockBlobClient blob = readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient leaseClient = new BlobLeaseClientBuilder().blobClient(blob).buildClient(); - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); - CloudBlockBlob blobMocked = Mockito.spy(blob); + BlockBlobClient blobMocked = Mockito.spy(blob); + BlobLeaseClient blobLeaseMocked = Mockito.spy(leaseClient); Mockito .doCallRealMethod() - .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + .when(blobLeaseMocked).renewLease(); AzurePersistence mockedRwPersistence = Mockito.spy(rwPersistence); WriteAccessController writeAccessController = new WriteAccessController(); - AzureRepositoryLock azureRepositoryLock = new AzureRepositoryLock(blobMocked, () -> {}, writeAccessController); - AzureArchiveManager azureArchiveManager = new AzureArchiveManager(oakDirectory, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), writeAccessController); + AzureRepositoryLock azureRepositoryLock = new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> { + }, writeAccessController); + AzureArchiveManager azureArchiveManager = new AzureArchiveManager(oakDirectory, writeOakDirectory, "", new IOMonitorAdapter(), new FileStoreMonitorAdapter(), writeAccessController); Mockito @@ -503,7 +537,7 @@ public void testWriteAfterLosingRepoLock() throws Exception { .doReturn(azureArchiveManager) .when(mockedRwPersistence).createArchiveManager(Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.any(), Mockito.any(), Mockito.any()); Mockito - .doReturn(new AzureJournalFile(oakDirectory, "journal.log", writeAccessController)) + .doReturn(new AzureJournalFile(oakDirectory, writeOakDirectory, "journal.log", writeAccessController)) .when(mockedRwPersistence).getJournalFile(); FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(mockedRwPersistence).build(); @@ -514,10 +548,11 @@ public void testWriteAfterLosingRepoLock() throws Exception { // simulate operation timeout when trying to renew lease Mockito.reset(blobMocked); - StorageException storageException = - new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + BlobStorageException storageException = + //new BlobStorageException("operation timeout", BlobErrorCode.OPERATION_TIMED_OUT, new TimeoutException()); + new BlobStorageException("operation timeout", null, new TimeoutException()); - Mockito.doThrow(storageException).when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + Mockito.doThrow(storageException).when(blobLeaseMocked).renewLease(); // wait till lease expires @@ -538,7 +573,7 @@ public void testWriteAfterLosingRepoLock() throws Exception { Thread.sleep(2000); // It should be possible to start another RW file store. - FileStore rwFileStore2 = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(new AzurePersistence(oakDirectory)).build(); + FileStore rwFileStore2 = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(new AzurePersistence(oakDirectory, writeOakDirectory, noRetryOakDirectory, "")).build(); SegmentNodeStore segmentNodeStore2 = SegmentNodeStoreBuilders.builder(rwFileStore2).build(); NodeBuilder builder2 = segmentNodeStore2.getRoot().builder(); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java index f83d2e7f6f6..dcd08ca3d2d 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java @@ -18,10 +18,8 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; import org.apache.jackrabbit.oak.segment.file.GcJournalTest; import org.junit.Before; @@ -29,24 +27,26 @@ import org.junit.Ignore; import org.junit.Test; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; public class AzureGCJournalTest extends GcJournalTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); } @Override protected SegmentNodeStorePersistence getPersistence() throws Exception { - return new AzurePersistence(container.getDirectoryReference("oak")); + return new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); } @Test diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicyTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicyTest.java new file mode 100644 index 00000000000..59edf558d5e --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicyTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpRequest; +import com.azure.core.http.HttpResponse; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; +import org.junit.Before; +import org.junit.Test; +import reactor.core.publisher.Mono; +import reactor.test.StepVerifier; + +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.*; + +public class AzureHttpRequestLoggingPolicyTest { + + private AzureHttpRequestLoggingPolicy loggingPolicy; + private HttpPipelineCallContext mockContext; + private HttpPipelineNextPolicy mockNextPolicy; + private HttpResponse mockHttpResponse; + private RemoteStoreMonitor mockRemoteStoreMonitor; + + @Before + public void setup() { + loggingPolicy = new AzureHttpRequestLoggingPolicy(); + mockContext = mock(HttpPipelineCallContext.class); + mockNextPolicy = mock(HttpPipelineNextPolicy.class); + mockHttpResponse = mock(HttpResponse.class); + mockRemoteStoreMonitor = mock(RemoteStoreMonitor.class); + } + + + @Test + public void testRemoteStoreMonitorTracksMetrics() { + // Attach the remote store monitor + loggingPolicy.setRemoteStoreMonitor(mockRemoteStoreMonitor); + + // Setup mock behavior + HttpRequest mockHttpRequest = mock(HttpRequest.class); + when(mockContext.getHttpRequest()).thenReturn(mockHttpRequest); + when(mockNextPolicy.process()).thenReturn(Mono.just(mockHttpResponse)); + when(mockHttpResponse.getStatusCode()).thenReturn(200); + + // Run the process method + Mono result = loggingPolicy.process(mockContext, mockNextPolicy); + + // Verify the result + StepVerifier.create(result) + .expectNext(mockHttpResponse) + .verifyComplete(); + + // Verify that the monitor recorded the metrics + verify(mockRemoteStoreMonitor, times(1)).requestDuration(anyLong(), eq(TimeUnit.NANOSECONDS)); + verify(mockRemoteStoreMonitor, times(1)).requestCount(); + verify(mockRemoteStoreMonitor, never()).requestError(); + } + + @Test + public void testErrorStatusCodeTriggersErrorCount() { + loggingPolicy.setRemoteStoreMonitor(mockRemoteStoreMonitor); + + // Setup mock behavior + HttpRequest mockHttpRequest = mock(HttpRequest.class); + when(mockContext.getHttpRequest()).thenReturn(mockHttpRequest); + when(mockNextPolicy.process()).thenReturn(Mono.just(mockHttpResponse)); + when(mockHttpResponse.getStatusCode()).thenReturn(500); // Error status code + + // Run the process method + Mono result = loggingPolicy.process(mockContext, mockNextPolicy); + + // Verify the result + StepVerifier.create(result) + .expectNext(mockHttpResponse) + .verifyComplete(); + + // Verify that error count was recorded + verify(mockRemoteStoreMonitor, times(1)).requestDuration(anyLong(), eq(TimeUnit.NANOSECONDS)); + verify(mockRemoteStoreMonitor, times(1)).requestError(); + verify(mockRemoteStoreMonitor, never()).requestCount(); + } + + @Test + public void testNoRemoteStoreMonitor() { + // Setup: No remoteStoreMonitor is attached + when(mockNextPolicy.process()).thenReturn(Mono.just(mockHttpResponse)); + when(mockHttpResponse.getStatusCode()).thenReturn(200); + + // Run the process method + Mono result = loggingPolicy.process(mockContext, mockNextPolicy); + + // Verify that the result is correct and that no interactions with the monitor occurred + StepVerifier.create(result) + .expectNext(mockHttpResponse) + .verifyComplete(); + + verifyNoInteractions(mockRemoteStoreMonitor); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingTestingPolicy.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingTestingPolicy.java new file mode 100644 index 00000000000..d76554e3caa --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingTestingPolicy.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpResponse; +import com.azure.core.http.policy.HttpPipelinePolicy; +import org.apache.jackrabbit.guava.common.base.Stopwatch; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; + +public class AzureHttpRequestLoggingTestingPolicy implements HttpPipelinePolicy { + + private static final Logger log = LoggerFactory.getLogger(AzureHttpRequestLoggingTestingPolicy.class); + + @Override + public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + Stopwatch stopwatch = Stopwatch.createStarted(); + + log.info("HTTP Request: {} {}", context.getHttpRequest().getHttpMethod(), context.getHttpRequest().getUrl()); + + return next.process().flatMap(httpResponse -> { + log.info("Status code is: {}", httpResponse.getStatusCode()); + log.info("Response time: {}ms", (stopwatch.elapsed(TimeUnit.NANOSECONDS)) / 1_000_000); + + return Mono.just(httpResponse); + }); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java index f5da9f15e6a..b54c5fdfeaa 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java @@ -16,13 +16,14 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.ListBlobItem; +import java.util.stream.Collectors; import java.util.stream.IntStream; + +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; import org.apache.commons.lang3.time.StopWatch; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileReader; import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileWriter; @@ -32,8 +33,6 @@ import org.junit.Test; import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; import java.util.ArrayList; import java.util.List; @@ -48,20 +47,23 @@ public class AzureJournalFileTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + + private BlobContainerClient writeBlobContainerClient; private AzureJournalFile journal; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - journal = new AzureJournalFile(container.getDirectoryReference("journal"), "journal.log", writeAccessController, 50); + journal = new AzureJournalFile(readBlobContainerClient, writeBlobContainerClient, "journal.log", writeAccessController, 50); } @Test - public void testSplitJournalFiles() throws IOException, URISyntaxException, StorageException { + public void testSplitJournalFiles() throws IOException { assertFalse(journal.exists()); int index = 0; @@ -81,13 +83,11 @@ public void testSplitJournalFiles() throws IOException, URISyntaxException, Stor assertJournalEntriesCount(index); } - private int countJournalBlobs() throws URISyntaxException, StorageException { - List result = new ArrayList<>(); - for (ListBlobItem b : container.getDirectoryReference("journal").listBlobs("journal.log")) { - if (b instanceof CloudAppendBlob) { - result.add((CloudAppendBlob) b); - } - } + private int countJournalBlobs() { + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix("journal.log"); + + List result = readBlobContainerClient.listBlobs(listBlobsOptions, null).stream().collect(Collectors.toList()); return result.size(); } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java index 2b2b0d35ce4..241f42f2dd7 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java @@ -18,10 +18,8 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; import org.junit.Before; import org.junit.ClassRule; @@ -40,16 +38,20 @@ public class AzureManifestFileTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); } @Test - public void testManifest() throws URISyntaxException, IOException { - ManifestFile manifestFile = new AzurePersistence(container.getDirectoryReference("oak")).getManifestFile(); + public void testManifest() throws IOException { + ManifestFile manifestFile = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak").getManifestFile(); assertFalse(manifestFile.exists()); Properties props = new Properties(); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java index 63a2da26881..cbc3d2cf7c1 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java @@ -18,11 +18,8 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.commons.Buffer; import org.apache.jackrabbit.oak.segment.SegmentId; import org.apache.jackrabbit.oak.segment.SegmentNotFoundException; @@ -50,16 +47,20 @@ public class AzureReadSegmentTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); } @Test(expected = SegmentNotFoundException.class) - public void testReadNonExistentSegmentRepositoryReachable() throws URISyntaxException, IOException, InvalidFileStoreVersionException, StorageException { - AzurePersistence p = new AzurePersistence(container.getDirectoryReference("oak")); + public void testReadNonExistentSegmentRepositoryReachable() throws IOException, InvalidFileStoreVersionException, BlobStorageException { + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentId id = new SegmentId(fs, 0, 0); @@ -71,8 +72,8 @@ public void testReadNonExistentSegmentRepositoryReachable() throws URISyntaxExce } @Test(expected = RepositoryNotReachableException.class) - public void testReadExistentSegmentRepositoryNotReachable() throws URISyntaxException, IOException, InvalidFileStoreVersionException, StorageException { - AzurePersistence p = new ReadFailingAzurePersistence(container.getDirectoryReference("oak")); + public void testReadExistentSegmentRepositoryNotReachable() throws IOException, InvalidFileStoreVersionException, BlobStorageException { + AzurePersistence p = new ReadFailingAzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentId id = new SegmentId(fs, 0, 0); @@ -87,18 +88,17 @@ public void testReadExistentSegmentRepositoryNotReachable() throws URISyntaxExce } static class ReadFailingAzurePersistence extends AzurePersistence { - public ReadFailingAzurePersistence(CloudBlobDirectory segmentStoreDirectory) { - super(segmentStoreDirectory); + public ReadFailingAzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, BlobContainerClient noRetryBlobContainerClient, String rootPrefix) { + super(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, rootPrefix); } @Override public SegmentArchiveManager createArchiveManager(boolean mmap, boolean offHeapAccess, IOMonitor ioMonitor, - FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { - return new AzureArchiveManager(segmentstoreDirectory, ioMonitor, fileStoreMonitor, writeAccessController) { + FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { + return new AzureArchiveManager(readBlobContainerClient, writeBlobContainerClient, rootPrefix, ioMonitor, fileStoreMonitor, writeAccessController) { @Override public SegmentArchiveReader open(String archiveName) throws IOException { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - return new AzureSegmentArchiveReader(archiveDirectory, ioMonitor) { + return new AzureSegmentArchiveReader(readBlobContainerClient, rootPrefix, archiveName, ioMonitor) { @Override public Buffer readSegment(long msb, long lsb) throws IOException { throw new RepositoryNotReachableException( @@ -109,12 +109,12 @@ public Buffer readSegment(long msb, long lsb) throws IOException { @Override public SegmentArchiveWriter create(String archiveName) throws IOException { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - return new AzureSegmentArchiveWriter(archiveDirectory, ioMonitor, fileStoreMonitor, writeAccessController) { + return new AzureSegmentArchiveWriter(writeBlobContainerClient, rootPrefix, archiveName, ioMonitor, fileStoreMonitor, writeAccessController) { @Override public Buffer readSegment(long msb, long lsb) throws IOException { throw new RepositoryNotReachableException( - new RuntimeException("Cannot access Azure storage")); } + new RuntimeException("Cannot access Azure storage")); + } }; } }; diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java index 03c878c7515..077f344fad0 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java @@ -18,12 +18,15 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageErrorCodeStrings; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlockBlob; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.core.http.HttpHeaderName; +import com.azure.core.http.HttpHeaders; +import com.azure.core.http.RequestConditions; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobErrorCode; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.BlobLeaseClient; +import com.azure.storage.blob.specialized.BlobLeaseClientBuilder; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; import org.junit.Before; @@ -42,6 +45,7 @@ import java.util.concurrent.TimeoutException; import static org.junit.Assert.*; +import static org.mockito.ArgumentMatchers.*; public class AzureRepositoryLockTest { @@ -53,11 +57,13 @@ public class AzureRepositoryLockTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient noRetryBlobContainerClient; + private BlobContainerClient readBlobContainerClient; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); } @Rule @@ -66,11 +72,13 @@ public void setup() throws StorageException, InvalidKeyException, URISyntaxExcep .and(AzureRepositoryLock.TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, TIME_TO_WAIT_BEFORE_BLOCK); @Test - public void testFailingLock() throws URISyntaxException, IOException, StorageException { - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); - new AzureRepositoryLock(blob, () -> {}, new WriteAccessController()).lock(); + public void testFailingLock() throws IOException, BlobStorageException { + BlockBlobClient blockBlobClient = readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlockBlobClient noRetrtBlockBlobClient = noRetryBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(noRetrtBlockBlobClient).buildClient(); + new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, new WriteAccessController()).lock(); try { - new AzureRepositoryLock(blob, () -> {}, new WriteAccessController()).lock(); + new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, new WriteAccessController()).lock(); fail("The second lock should fail."); } catch (IOException e) { // it's fine @@ -78,12 +86,14 @@ public void testFailingLock() throws URISyntaxException, IOException, StorageExc } @Test - public void testWaitingLock() throws URISyntaxException, IOException, StorageException, InterruptedException { - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + public void testWaitingLock() throws BlobStorageException, InterruptedException, IOException { + BlockBlobClient blockBlobClient = readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlockBlobClient noRetrtBlockBlobClient = noRetryBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(noRetrtBlockBlobClient).buildClient(); Semaphore s = new Semaphore(0); new Thread(() -> { try { - RepositoryLock lock = new AzureRepositoryLock(blob, () -> {}, new WriteAccessController()).lock(); + RepositoryLock lock = new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, new WriteAccessController()).lock(); s.release(); Thread.sleep(1000); lock.unlock(); @@ -93,33 +103,36 @@ public void testWaitingLock() throws URISyntaxException, IOException, StorageExc }).start(); s.acquire(); - new AzureRepositoryLock(blob, () -> {}, new WriteAccessController(), 10).lock(); + new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, new WriteAccessController(), 10).lock(); } @Test - public void testLeaseRefreshUnsuccessful() throws URISyntaxException, StorageException, IOException, InterruptedException { - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + public void testLeaseRefreshUnsuccessful() throws BlobStorageException, IOException, InterruptedException { + BlockBlobClient blockBlobClient = readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlockBlobClient noRetryBlockBlobClient = noRetryBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(noRetryBlockBlobClient).buildClient(); - CloudBlockBlob blobMocked = Mockito.spy(blob); + BlockBlobClient blobMocked = Mockito.spy(blockBlobClient); + BlobLeaseClient blobLeaseMocked = Mockito.spy(blobLeaseClient); // instrument the mock to throw the exception twice when renewing the lease - StorageException storageException = - new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + BlobStorageException storageException = + new BlobStorageException("operation timeout", null, new TimeoutException()); Mockito.doThrow(storageException) .doThrow(storageException) .doCallRealMethod() - .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + .when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions) any(), any(), any()); - new AzureRepositoryLock(blobMocked, () -> {}, new WriteAccessController()).lock(); + new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, new WriteAccessController()).lock(); // wait till lease expires Thread.sleep(16000); // reset the mock to default behaviour - Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + Mockito.doCallRealMethod().when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions) any(), any(), any()); try { - new AzureRepositoryLock(blobMocked, () -> {}, new WriteAccessController()).lock(); + new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, new WriteAccessController()).lock(); fail("The second lock should fail."); } catch (IOException e) { // it's fine @@ -128,23 +141,32 @@ public void testLeaseRefreshUnsuccessful() throws URISyntaxException, StorageExc @Test public void testWritesBlockedOnlyAfterFewUnsuccessfulAttempts() throws Exception { + BlockBlobClient blockBlobClient = readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlockBlobClient noRetrtBlockBlobClient = noRetryBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(noRetrtBlockBlobClient).buildClient(); - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); - - CloudBlockBlob blobMocked = Mockito.spy(blob); + BlockBlobClient blobMocked = Mockito.spy(blockBlobClient); + BlobLeaseClient blobLeaseMocked = Mockito.spy(blobLeaseClient); // instrument the mock to throw the exception twice when renewing the lease - StorageException storageException = - new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + HttpHeaders headers = new HttpHeaders(); + headers.add(HttpHeaderName.fromString("x-ms-error-code"), BlobErrorCode.OPERATION_TIMED_OUT.toString()); + + MockAzureHttpResponse mockAzureHttpResponse = new MockAzureHttpResponse(306, "operation timeout"); + mockAzureHttpResponse.setHeaders(headers); + + BlobStorageException storageException = + new BlobStorageException("operation timeout", mockAzureHttpResponse, new TimeoutException()); + Mockito .doCallRealMethod() .doThrow(storageException) - .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + .when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions) isNull(), Mockito.any(), Mockito.any()); WriteAccessController writeAccessController = new WriteAccessController(); - new AzureRepositoryLock(blobMocked, () -> {}, writeAccessController).lock(); + new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, writeAccessController).lock(); Thread thread = new Thread(() -> { @@ -166,6 +188,6 @@ public void testWritesBlockedOnlyAfterFewUnsuccessfulAttempts() throws Exception Thread.sleep(5000); assertTrue("after more than 9 seconds thread should be in a waiting state", thread.getState().equals(Thread.State.WAITING)); - Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + Mockito.doCallRealMethod().when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions) any(), any(), any()); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java index ff45bf67cd7..b2a12f039b4 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java @@ -18,8 +18,11 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.common.policy.RequestRetryOptions; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; @@ -27,21 +30,18 @@ import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; import org.jetbrains.annotations.NotNull; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; +import org.junit.*; import org.mockserver.client.MockServerClient; import org.mockserver.junit.MockServerRule; import org.mockserver.matchers.Times; import org.mockserver.model.BinaryBody; import org.mockserver.model.HttpRequest; -import shaded_package.org.apache.http.client.utils.URIBuilder; import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; import java.util.UUID; +import static org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule.ACCOUNT_KEY; +import static org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule.ACCOUNT_NAME; import static org.junit.Assert.assertThrows; import static org.mockserver.model.HttpRequest.request; import static org.mockserver.model.HttpResponse.response; @@ -50,6 +50,10 @@ public class AzureSegmentArchiveWriterTest { public static final String BASE_PATH = "/devstoreaccount1/oak-test"; public static final int MAX_ATTEMPTS = 3; + private static final String RETRY_ATTEMPTS = "segment.azure.retry.attempts"; + private static final String TIMEOUT_EXECUTION = "segment.timeout.execution"; + private static final String RETRY_INTERVAL_MS = "azure.segment.archive.writer.retries.intervalMs"; + private static final String WRITE_RETRY_ATTEMPTS = "azure.segment.archive.writer.retries.max"; @Rule public MockServerRule mockServerRule = new MockServerRule(this); @@ -57,24 +61,32 @@ public class AzureSegmentArchiveWriterTest { @SuppressWarnings("unused") private MockServerClient mockServerClient; - private CloudBlobContainer container; + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); @Before public void setUp() throws Exception { - container = createCloudBlobContainer(); - - System.setProperty("azure.segment.archive.writer.retries.intervalMs", "100"); - System.setProperty("azure.segment.archive.writer.retries.max", Integer.toString(MAX_ATTEMPTS)); + mockServerClient = new MockServerClient("localhost", mockServerRule.getPort()); + System.setProperty(RETRY_INTERVAL_MS, "100"); + System.setProperty(WRITE_RETRY_ATTEMPTS, Integer.toString(MAX_ATTEMPTS)); // Disable Azure SDK own retry mechanism used by AzureSegmentArchiveWriter - System.setProperty("segment.azure.retry.attempts", "0"); - System.setProperty("segment.timeout.execution", "1"); + System.setProperty(RETRY_ATTEMPTS, "1"); + System.setProperty(TIMEOUT_EXECUTION, "1"); + } + + @AfterClass + public static void setDown() { + // resetting the values for the properties set in setUp(). otherwise these will apply to all the tests that are executed after + System.clearProperty(RETRY_ATTEMPTS); + System.clearProperty(TIMEOUT_EXECUTION); + System.clearProperty(RETRY_INTERVAL_MS); + System.clearProperty(WRITE_RETRY_ATTEMPTS); } @Test public void retryWhenFailureOnWriteBinaryReferences_eventuallySucceed() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); + expectWriteRequests(); HttpRequest writeBinaryReferencesRequest = getWriteBinaryReferencesRequest(); // fail twice @@ -86,6 +98,9 @@ public void retryWhenFailureOnWriteBinaryReferences_eventuallySucceed() throws E .when(writeBinaryReferencesRequest, Times.once()) .respond(response().withStatusCode(201)); + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + writer.writeBinaryReferences(new byte[10]); mockServerClient.verify(writeBinaryReferencesRequest, exactly(MAX_ATTEMPTS)); @@ -93,8 +108,7 @@ public void retryWhenFailureOnWriteBinaryReferences_eventuallySucceed() throws E @Test public void retryWhenFailureOnWriteGraph_eventuallySucceed() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); + expectWriteRequests(); HttpRequest writeGraphRequest = getWriteGraphRequest(); // fail twice @@ -106,6 +120,9 @@ public void retryWhenFailureOnWriteGraph_eventuallySucceed() throws Exception { .when(writeGraphRequest, Times.once()) .respond(response().withStatusCode(201)); + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + writer.writeGraph(new byte[10]); mockServerClient.verify(writeGraphRequest, exactly(MAX_ATTEMPTS)); @@ -113,8 +130,7 @@ public void retryWhenFailureOnWriteGraph_eventuallySucceed() throws Exception { @Test public void retryWhenFailureOnClose_eventuallySucceed() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); + expectWriteRequests(); HttpRequest closeArchiveRequest = getCloseArchiveRequest(); // fail twice @@ -126,6 +142,9 @@ public void retryWhenFailureOnClose_eventuallySucceed() throws Exception { .when(closeArchiveRequest, Times.once()) .respond(response().withStatusCode(201)); + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + writer.close(); mockServerClient.verify(closeArchiveRequest, exactly(MAX_ATTEMPTS)); @@ -133,8 +152,7 @@ public void retryWhenFailureOnClose_eventuallySucceed() throws Exception { @Test public void retryWhenFailureOnClose_failAfterLastRetryAttempt() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); + expectWriteRequests(); HttpRequest closeArchiveRequest = getCloseArchiveRequest(); // always fail @@ -142,6 +160,8 @@ public void retryWhenFailureOnClose_failAfterLastRetryAttempt() throws Exception .when(closeArchiveRequest, Times.unlimited()) .respond(response().withStatusCode(500)); + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); assertThrows(IOException.class, writer::close); @@ -150,7 +170,6 @@ public void retryWhenFailureOnClose_failAfterLastRetryAttempt() throws Exception private void writeAndFlushSegment(SegmentArchiveWriter writer) throws IOException { - expectWriteRequests(); UUID u = UUID.randomUUID(); writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); writer.flush(); @@ -167,10 +186,17 @@ private void expectWriteRequests() { } @NotNull - private SegmentArchiveWriter createSegmentArchiveWriter() throws URISyntaxException, IOException { + private SegmentArchiveWriter createSegmentArchiveWriter() throws IOException { + createContainerMock(); + BlobContainerClient readBlobContainerClient = getCloudStorageAccount("oak-test", AzureRequestOptions.getRetryOptionsDefault()); + BlobContainerClient writeBlobContainerClient = getCloudStorageAccount("oak-test", AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations()); + BlobContainerClient noRetryBlobContainerClient = getCloudStorageAccount("oak-test", null); + writeBlobContainerClient.deleteIfExists(); + writeBlobContainerClient.createIfNotExists(); + WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - AzurePersistence azurePersistence = new AzurePersistence(container.getDirectoryReference("oak"));/**/ + AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak");/**/ azurePersistence.setWriteAccessController(writeAccessController); SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -180,44 +206,62 @@ private SegmentArchiveWriter createSegmentArchiveWriter() throws URISyntaxExcept private static HttpRequest getCloseArchiveRequest() { return request() .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/closed"); + .withPath(BASE_PATH + "/oak%2Fdata00000a.tar%2Fclosed"); } private static HttpRequest getWriteBinaryReferencesRequest() { return request() .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/data00000a.tar.brf"); + .withPath(BASE_PATH + "/oak%2Fdata00000a.tar%2Fdata00000a.tar.brf"); } private static HttpRequest getWriteGraphRequest() { return request() .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/data00000a.tar.gph"); + .withPath(BASE_PATH + "/oak%2Fdata00000a.tar%2Fdata00000a.tar.gph"); } private static HttpRequest getUploadSegmentMetadataRequest() { return request() .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/.*") + .withPath(BASE_PATH + "/oak%2Fdata00000a.tar%2F.*") .withQueryStringParameter("comp", "metadata"); } private static HttpRequest getUploadSegmentDataRequest() { return request() .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/.*") + .withPath(BASE_PATH + "/oak%2Fdata00000a.tar%2F.*") .withBody(new BinaryBody(new byte[10])); } - @NotNull - private CloudBlobContainer createCloudBlobContainer() throws URISyntaxException, StorageException { - URI uri = new URIBuilder() - .setScheme("http") - .setHost(mockServerClient.remoteAddress().getHostName()) - .setPort(mockServerClient.remoteAddress().getPort()) - .setPath(BASE_PATH) - .build(); + private void createContainerMock() { + mockServerClient + .when(request() + .withMethod("PUT") + .withPath(BASE_PATH)) + .respond(response().withStatusCode(201).withBody("Container created successfully")); + } + + public BlobContainerClient getCloudStorageAccount(String containerName, RequestRetryOptions retryOptions) { + String blobEndpoint = "BlobEndpoint=http://localhost:" + mockServerRule.getPort() + "/devstoreaccount1"; + String accountName = "AccountName=" + ACCOUNT_NAME; + String accountKey = "AccountKey=" + ACCOUNT_KEY; + + AzureHttpRequestLoggingTestingPolicy azureHttpRequestLoggingTestingPolicy = new AzureHttpRequestLoggingTestingPolicy(); - return new CloudBlobContainer(uri); + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() + .endpoint(azurite.getBlobEndpoint()) + .addPolicy(azureHttpRequestLoggingTestingPolicy) + .connectionString(("DefaultEndpointsProtocol=http;" + accountName + ";" + accountKey + ";" + blobEndpoint)); + + if (retryOptions != null) { + builder.retryOptions(retryOptions); + } + + BlobServiceClient blobServiceClient = builder.buildClient(); + + return blobServiceClient.getBlobContainerClient(containerName); } + } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java index 8a9b35faed1..fb82fefd6a8 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java @@ -16,46 +16,36 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; -import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import java.io.IOException; -import java.net.URISyntaxException; -import java.time.Duration; -import java.time.Instant; -import java.util.Date; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.Set; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.sas.BlobSasPermission; +import com.azure.storage.blob.sas.BlobServiceSasSignatureValues; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; import org.apache.sling.testing.mock.osgi.junit.OsgiContext; import org.jetbrains.annotations.NotNull; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.osgi.util.converter.Converters; +import java.util.stream.Stream; +import java.io.ByteArrayInputStream; +import java.time.Duration; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.util.HashMap; +import java.util.Set; +import java.util.stream.StreamSupport; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_ACCOUNT_NAME; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_TENANT_ID; - -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.ADD; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.CREATE; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.LIST; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.READ; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.WRITE; import static java.util.stream.Collectors.toSet; -import static org.junit.Assert.assertEquals; +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.*; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import static org.junit.Assume.assumeNotNull; @@ -68,23 +58,39 @@ public class AzureSegmentStoreServiceTest { @Rule public final OsgiContext context = new OsgiContext(); - private static final EnumSet READ_ONLY = EnumSet.of(READ, LIST); - private static final EnumSet READ_WRITE = EnumSet.of(READ, LIST, CREATE, WRITE, ADD); + private static BlobSasPermission READ_ONLY; + private static BlobSasPermission READ_WRITE; private static final Set BLOBS = Set.of("blob1", "blob2"); - private CloudBlobContainer container; - + private BlobContainerClient container; + + @BeforeClass + public static void setupTest(){ + READ_ONLY = new BlobSasPermission(); + READ_ONLY.setReadPermission(true); + READ_ONLY.setListPermission(true); + + READ_WRITE = new BlobSasPermission(); + READ_WRITE.setReadPermission(true); + READ_WRITE.setListPermission(true); + READ_WRITE.setCreatePermission(true); + READ_WRITE.setWritePermission(true); + READ_WRITE.setAddPermission(true); + System.setProperty("segment.azure.v12.enabled", "true"); + + } + @Before public void setup() throws Exception { - container = azurite.getContainer(AzureSegmentStoreService.DEFAULT_CONTAINER_NAME); + container = azurite.getReadBlobContainerClient(AzureSegmentStoreService.DEFAULT_CONTAINER_NAME); for (String blob : BLOBS) { - container.getBlockBlobReference(blob + ".txt").uploadText(blob); + container.getBlobClient(blob + ".txt").getBlockBlobClient().upload(new ByteArrayInputStream(blob.getBytes()), blob.length()); } } @Test public void connectWithSharedAccessSignatureURL_readOnly() throws Exception { - String sasToken = container.generateSharedAccessSignature(policy(READ_ONLY), null); + String sasToken = container.generateSas(policy(READ_ONLY), null); AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); @@ -97,7 +103,7 @@ public void connectWithSharedAccessSignatureURL_readOnly() throws Exception { @Test public void connectWithSharedAccessSignatureURL_readWrite() throws Exception { - String sasToken = container.generateSharedAccessSignature(policy(READ_WRITE), null); + String sasToken = container.generateSas(policy(READ_WRITE), null); AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); @@ -110,8 +116,7 @@ public void connectWithSharedAccessSignatureURL_readWrite() throws Exception { @Test public void connectWithSharedAccessSignatureURL_expired() throws Exception { - SharedAccessBlobPolicy expiredPolicy = policy(READ_WRITE, yesterday()); - String sasToken = container.generateSharedAccessSignature(expiredPolicy, null); + String sasToken = container.generateSas(policy(READ_WRITE, -1), null); AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); @@ -178,48 +183,45 @@ public void deactivate() throws Exception { } @NotNull - private static SharedAccessBlobPolicy policy(EnumSet permissions, Instant expirationTime) { - SharedAccessBlobPolicy sharedAccessBlobPolicy = new SharedAccessBlobPolicy(); - sharedAccessBlobPolicy.setPermissions(permissions); - sharedAccessBlobPolicy.setSharedAccessExpiryTime(Date.from(expirationTime)); - return sharedAccessBlobPolicy; + private static BlobServiceSasSignatureValues policy(BlobSasPermission permissions, long days) { + return new BlobServiceSasSignatureValues(OffsetDateTime.now().plusDays(days), permissions); } @NotNull - private static SharedAccessBlobPolicy policy(EnumSet permissions) { - return policy(permissions, Instant.now().plus(Duration.ofDays(7))); + private static BlobServiceSasSignatureValues policy(BlobSasPermission permissions) { + return policy(permissions, 7); } private static void assertReadAccessGranted(SegmentNodeStorePersistence persistence, Set expectedBlobs) throws Exception { - CloudBlobContainer container = getContainerFrom(persistence); + BlobContainerClient container = getContainerFrom(persistence); Set actualBlobNames = StreamSupport.stream(container.listBlobs().spliterator(), false) - .map(blob -> blob.getUri().getPath()) - .map(path -> path.substring(path.lastIndexOf('/') + 1)) - .filter(name -> name.equals("test.txt") || name.startsWith("blob")) - .collect(toSet()); + .map(BlobItem::getName) + .map(path -> path.substring(path.lastIndexOf('/') + 1)) + .filter(name -> name.equals("test.txt") || name.startsWith("blob")) + .collect(toSet()); Set expectedBlobNames = expectedBlobs.stream().map(name -> name + ".txt").collect(toSet()); assertEquals(expectedBlobNames, actualBlobNames); Set actualBlobContent = actualBlobNames.stream() - .map(name -> { - try { - return container.getBlockBlobReference(name).downloadText(); - } catch (StorageException | IOException | URISyntaxException e) { - throw new RuntimeException("Error while reading blob " + name, e); - } - }) - .collect(toSet()); + .map(name -> { + try { + return container.getBlobClient(name).downloadContent().toString(); + } catch (BlobStorageException e) { + throw new RuntimeException("Error while reading blob " + name, e); + } + }) + .collect(toSet()); assertEquals(expectedBlobs, actualBlobContent); } private static void assertWriteAccessGranted(SegmentNodeStorePersistence persistence) throws Exception { getContainerFrom(persistence) - .getBlockBlobReference("test.txt").uploadText("test"); + .getBlobClient("test.txt").upload(new ByteArrayInputStream("test".getBytes())); } - private static CloudBlobContainer getContainerFrom(SegmentNodeStorePersistence persistence) throws Exception { - return ((AzurePersistence) persistence).getSegmentstoreDirectory().getContainer(); + private static BlobContainerClient getContainerFrom(SegmentNodeStorePersistence persistence) throws Exception { + return ((AzurePersistence) persistence).getReadBlobContainerClient(); } private static void assertWriteAccessNotGranted(SegmentNodeStorePersistence persistence) { @@ -258,9 +260,9 @@ private static Configuration getConfigurationWithAccessKey(String accessKey) { private static Configuration getConfigurationWithConfigurationURL(String accessKey) { String connectionString = "DefaultEndpointsProtocol=https;" - + "BlobEndpoint=" + azurite.getBlobEndpoint() + ';' - + "AccountName=" + AzuriteDockerRule.ACCOUNT_NAME + ';' - + "AccountKey=" + accessKey + ';'; + + "BlobEndpoint=" + azurite.getBlobEndpoint() + ';' + + "AccountName=" + AzuriteDockerRule.ACCOUNT_NAME + ';' + + "AccountKey=" + accessKey + ';'; return getConfiguration(null, AzuriteDockerRule.ACCOUNT_NAME, null, connectionString, null, null, null); } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java index 009ec430a71..9c4aad9aebb 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java @@ -16,14 +16,12 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; +import org.apache.jackrabbit.oak.segment.file.tar.TarFileTest; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.file.tar.TarFileTest; import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; import org.junit.Before; import org.junit.ClassRule; @@ -31,27 +29,32 @@ import org.junit.Test; import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; public class AzureTarFileTest extends TarFileTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + + private BlobContainerClient writeBlobContainerClient; + + private BlobContainerClient noRetryBlobContainerClient; @Before @Override public void setUp() throws IOException { try { - container = azurite.getContainer("oak-test"); - AzurePersistence azurePersistence = new AzurePersistence(container.getDirectoryReference("oak")); + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); + + AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); azurePersistence.setWriteAccessController(writeAccessController); archiveManager = azurePersistence.createArchiveManager(true, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - } catch (StorageException | InvalidKeyException | URISyntaxException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java index 3be1367531d..afe76eeeee9 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java @@ -16,14 +16,12 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import org.apache.jackrabbit.oak.segment.file.tar.TarFiles; +import org.apache.jackrabbit.oak.segment.file.tar.TarFilesTest; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.file.tar.TarFiles; -import org.apache.jackrabbit.oak.segment.file.tar.TarFilesTest; import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; import org.junit.Before; import org.junit.ClassRule; @@ -33,13 +31,18 @@ public class AzureTarFilesTest extends TarFilesTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; @Before @Override public void setUp() throws Exception { - container = azurite.getContainer("oak-test"); - AzurePersistence azurePersistence = new AzurePersistence(container.getDirectoryReference("oak")); + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); + + AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); azurePersistence.setWriteAccessController(writeAccessController); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarWriterTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarWriterTest.java index c27e3a703a3..a3ee015f87b 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarWriterTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarWriterTest.java @@ -16,12 +16,10 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import org.apache.jackrabbit.oak.segment.file.tar.TarWriterTest; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.file.tar.TarWriterTest; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; import org.jetbrains.annotations.NotNull; @@ -35,11 +33,13 @@ public class AzureTarWriterTest extends TarWriterTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; @Before public void setUp() throws Exception { - container = azurite.getContainer("oak-test"); + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); } @NotNull @@ -47,7 +47,7 @@ public void setUp() throws Exception { protected SegmentArchiveManager getSegmentArchiveManager() throws Exception { WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - AzureArchiveManager azureArchiveManager = new AzureArchiveManager(container.getDirectoryReference("oak"), new IOMonitorAdapter(), monitor, writeAccessController); + AzureArchiveManager azureArchiveManager = new AzureArchiveManager(readBlobContainerClient, writeBlobContainerClient, "oak", new IOMonitorAdapter(), monitor, writeAccessController); return azureArchiveManager; } @@ -56,10 +56,10 @@ protected SegmentArchiveManager getSegmentArchiveManager() throws Exception { protected SegmentArchiveManager getFailingSegmentArchiveManager() throws Exception { final WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - return new AzureArchiveManager(container.getDirectoryReference("oak"), new IOMonitorAdapter(), monitor, writeAccessController) { + return new AzureArchiveManager(readBlobContainerClient, writeBlobContainerClient, "oak", new IOMonitorAdapter(), monitor, writeAccessController) { @Override public SegmentArchiveWriter create(String archiveName) throws IOException { - return new AzureSegmentArchiveWriter(getDirectory(archiveName), ioMonitor, monitor, writeAccessController) { + return new AzureSegmentArchiveWriter(writeBlobContainerClient, "oak", archiveName, ioMonitor, monitor, writeAccessController) { @Override public void writeGraph(@NotNull byte[] data) throws IOException { throw new IOException("test"); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java new file mode 100644 index 00000000000..187e85afb9a --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.common.policy.RequestRetryOptions; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; +import org.junit.Assume; +import org.junit.rules.ExternalResource; +import org.junit.runner.Description; +import org.junit.runners.model.MultipleFailureException; +import org.junit.runners.model.Statement; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.utility.DockerImageName; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +public class AzuriteDockerRule extends ExternalResource { + + private static final DockerImageName DOCKER_IMAGE_NAME = DockerImageName.parse("mcr.microsoft.com/azure-storage/azurite:3.31.0"); + public static final String ACCOUNT_KEY = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="; + public static final String ACCOUNT_NAME = "devstoreaccount1"; + private static final AtomicReference STARTUP_EXCEPTION = new AtomicReference<>(); + + private GenericContainer azuriteContainer; + + @Override + protected void before() throws Throwable { + azuriteContainer = new GenericContainer<>(DOCKER_IMAGE_NAME) + .withExposedPorts(10000) + .withEnv(Map.of("executable", "blob")) + .withStartupTimeout(Duration.ofSeconds(30)); + + try { + azuriteContainer.start(); + } catch (IllegalStateException e) { + STARTUP_EXCEPTION.set(e); + throw e; + } + } + + @Override + protected void after() { + if (azuriteContainer != null) { + azuriteContainer.stop(); + } + } + + @Override + public Statement apply(Statement base, Description description) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + try { + before(); + } catch (IllegalStateException e) { + Assume.assumeNoException(STARTUP_EXCEPTION.get()); + throw e; + } + + List errors = new ArrayList(); + try { + base.evaluate(); + } catch (Throwable t) { + errors.add(t); + } finally { + try { + after(); + } catch (Throwable t) { + errors.add(t); + } + } + MultipleFailureException.assertEmpty(errors); + } + }; + } + + public String getBlobEndpoint() { + return "http://127.0.0.1:" + getMappedPort() + "/devstoreaccount1"; + } + + public BlobContainerClient getReadBlobContainerClient(String name) throws BlobStorageException { + BlobContainerClient cloud = getCloudStorageAccount(name, AzureRequestOptions.getRetryOptionsDefault()); + cloud.deleteIfExists(); + cloud.create(); + return cloud; + } + + public BlobContainerClient getNoRetryBlobContainerClient(String name) throws BlobStorageException { + BlobContainerClient cloud = getCloudStorageAccount(name, null); + return cloud; + } + + public BlobContainerClient getWriteBlobContainerClient(String name) throws BlobStorageException { + BlobContainerClient cloud = getCloudStorageAccount(name, AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations()); + return cloud; + } + + public BlobContainerClient getCloudStorageAccount(String containerName, RequestRetryOptions retryOptions) { + String blobEndpoint = "BlobEndpoint=" + getBlobEndpoint(); + String accountName = "AccountName=" + ACCOUNT_NAME; + String accountKey = "AccountKey=" + ACCOUNT_KEY; + + AzureHttpRequestLoggingTestingPolicy azureHttpRequestLoggingTestingPolicy = new AzureHttpRequestLoggingTestingPolicy(); + + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() + .endpoint(getBlobEndpoint()) + .addPolicy(azureHttpRequestLoggingTestingPolicy) + .connectionString(("DefaultEndpointsProtocol=http;" + accountName + ";" + accountKey + ";" + blobEndpoint)); + if (retryOptions != null) { + builder.retryOptions(retryOptions); + } + + BlobServiceClient blobServiceClient = builder.buildClient(); + + return blobServiceClient.getBlobContainerClient(containerName); + } + + public int getMappedPort() { + return azuriteContainer.getMappedPort(10000); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/MockAzureHttpResponse.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/MockAzureHttpResponse.java new file mode 100644 index 00000000000..f1b1ba563b2 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/MockAzureHttpResponse.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.core.http.HttpHeaders; +import com.azure.core.http.HttpResponse; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; + +public class MockAzureHttpResponse extends HttpResponse { + + private final int statusCode; + private final String body; + private HttpHeaders headers; + + public MockAzureHttpResponse(int statusCode, String body) { + super(null); + this.statusCode = statusCode; + this.body = body; + } + + @Override + public int getStatusCode() { + return statusCode; + } + + @Override + public String getHeaderValue(String name) { + return null; // Simplified for this example + } + + @Override + public HttpHeaders getHeaders() { + return this.headers; + } + + public void setHeaders(HttpHeaders headers) { + this.headers = headers; + } + + + @Override + public Flux getBody() { + return null; + } + + @Override + public Mono getBodyAsByteArray() { + return Mono.just(body.getBytes()); + } + + @Override + public Mono getBodyAsString() { + return null; + } + + @Override + public Mono getBodyAsString(Charset charset) { + return null; + } +} + diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java index 2671b1f495b..deedd7b1758 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java @@ -19,22 +19,22 @@ package org.apache.jackrabbit.oak.segment.azure.fixture; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.common.policy.RequestRetryOptions; import org.apache.jackrabbit.guava.common.io.Files; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; import org.apache.jackrabbit.oak.spi.state.NodeStore; import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; import java.util.HashMap; import java.util.Map; import java.util.UUID; @@ -49,26 +49,29 @@ public class SegmentAzureFixture extends NodeStoreFixture { private Map fileStoreMap = new HashMap<>(); - private Map containerMap = new HashMap<>(); + private Map containerMap = new HashMap<>(); @Override public NodeStore createNodeStore() { AzurePersistence persistence; - CloudBlobContainer container; + BlobContainerClient writeBlobContainerClient; try { - CloudStorageAccount cloud = CloudStorageAccount.parse(AZURE_CONNECTION_STRING); - - while (true) { - String containerName = AZURE_CONTAINER + "-" + UUID.randomUUID().toString(); - container = cloud.createCloudBlobClient().getContainerReference(containerName); - if (!container.exists()) { - container.create(); - break; - } - } - CloudBlobDirectory directory = container.getDirectoryReference(AZURE_ROOT_PATH); - persistence = new AzurePersistence(directory); - } catch (StorageException | URISyntaxException | InvalidKeyException e) { + String containerName = AZURE_CONTAINER + "-" + UUID.randomUUID().toString(); + + String endpoint = String.format("https://%s.blob.core.windows.net", containerName); + + RequestRetryOptions retryOptions = AzureRequestOptions.getRetryOptionsDefault(); + BlobContainerClient reaBlobContainerClient = getBlobContainerClient(retryOptions, endpoint, containerName); + + RequestRetryOptions writeRetryOptions = AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations(); + writeBlobContainerClient = getBlobContainerClient(writeRetryOptions, endpoint, containerName); + + writeBlobContainerClient.createIfNotExists(); + + BlobContainerClient noRetryBlobContainerClient = getBlobContainerClient(null, endpoint, containerName); + + persistence = new AzurePersistence(reaBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, AZURE_ROOT_PATH); + } catch (BlobStorageException e) { throw new RuntimeException(e); } @@ -76,7 +79,7 @@ public NodeStore createNodeStore() { FileStore fileStore = FileStoreBuilder.fileStoreBuilder(Files.createTempDir()).withCustomPersistence(persistence).build(); NodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build(); fileStoreMap.put(nodeStore, fileStore); - containerMap.put(nodeStore, container); + containerMap.put(nodeStore, writeBlobContainerClient); return nodeStore; } catch (IOException | InvalidFileStoreVersionException e) { throw new RuntimeException(e); @@ -89,11 +92,11 @@ public void dispose(NodeStore nodeStore) { fs.close(); } try { - CloudBlobContainer container = containerMap.remove(nodeStore); + BlobContainerClient container = containerMap.remove(nodeStore); if (container != null) { container.deleteIfExists(); } - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new RuntimeException(e); } } @@ -102,4 +105,17 @@ public void dispose(NodeStore nodeStore) { public String toString() { return "SegmentAzure"; } + + private BlobContainerClient getBlobContainerClient(RequestRetryOptions retryOptions, String endpoint, String containerName) { + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() + .endpoint(endpoint) + .connectionString(AZURE_CONNECTION_STRING); + if (retryOptions != null) { + builder.retryOptions(retryOptions); + } + + BlobServiceClient blobServiceClient = builder.buildClient(); + + return blobServiceClient.getBlobContainerClient(containerName); + } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixtureV8.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixtureV8.java new file mode 100644 index 00000000000..deff5477aab --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixtureV8.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.jackrabbit.oak.segment.azure.fixture; + +import org.apache.jackrabbit.guava.common.io.Files; +import com.microsoft.azure.storage.CloudStorageAccount; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; +import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; +import org.apache.jackrabbit.oak.spi.state.NodeStore; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +public class SegmentAzureFixtureV8 extends NodeStoreFixture { + + private static final String AZURE_CONNECTION_STRING = System.getProperty("oak.segment.azure.connection", "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;"); + + private static final String AZURE_CONTAINER = System.getProperty("oak.segment.azure.container", "oak"); + + private static final String AZURE_ROOT_PATH = System.getProperty("oak.segment.azure.rootPath", "/oak"); + + private Map fileStoreMap = new HashMap<>(); + + private Map containerMap = new HashMap<>(); + + @Override + public NodeStore createNodeStore() { + AzurePersistenceV8 persistence; + CloudBlobContainer container; + try { + CloudStorageAccount cloud = CloudStorageAccount.parse(AZURE_CONNECTION_STRING); + + while (true) { + String containerName = AZURE_CONTAINER + "-" + UUID.randomUUID().toString(); + container = cloud.createCloudBlobClient().getContainerReference(containerName); + if (!container.exists()) { + container.create(); + break; + } + } + CloudBlobDirectory directory = container.getDirectoryReference(AZURE_ROOT_PATH); + persistence = new AzurePersistenceV8(directory); + } catch (StorageException | URISyntaxException | InvalidKeyException e) { + throw new RuntimeException(e); + } + + try { + FileStore fileStore = FileStoreBuilder.fileStoreBuilder(Files.createTempDir()).withCustomPersistence(persistence).build(); + NodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build(); + fileStoreMap.put(nodeStore, fileStore); + containerMap.put(nodeStore, container); + return nodeStore; + } catch (IOException | InvalidFileStoreVersionException e) { + throw new RuntimeException(e); + } + } + + public void dispose(NodeStore nodeStore) { + FileStore fs = fileStoreMap.remove(nodeStore); + if (fs != null) { + fs.close(); + } + try { + CloudBlobContainer container = containerMap.remove(nodeStore); + if (container != null) { + container.deleteIfExists(); + } + } catch (StorageException e) { + throw new RuntimeException(e); + } + } + + @Override + public String toString() { + return "SegmentAzure"; + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java index dcd59155ba4..5a72b29dceb 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java @@ -16,18 +16,18 @@ */ package org.apache.jackrabbit.oak.segment.azure.journal; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.AppendBlobClient; +import org.apache.jackrabbit.oak.segment.azure.AzureJournalFile; +import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.file.JournalReader; import org.apache.jackrabbit.oak.segment.file.JournalReaderTest; -import org.apache.jackrabbit.oak.segment.azure.AzureJournalFile; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.junit.Before; import org.junit.ClassRule; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.net.URISyntaxException; import java.security.InvalidKeyException; @@ -37,20 +37,25 @@ public class AzureJournalReaderTest extends JournalReaderTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); } protected JournalReader createJournalReader(String s) throws IOException { try { - CloudAppendBlob blob = container.getAppendBlobReference("journal/journal.log.001"); - blob.createOrReplace(); - blob.appendText(s); - return new JournalReader(new AzureJournalFile(container.getDirectoryReference("journal"), "journal.log", new WriteAccessController())); - } catch (StorageException | URISyntaxException e) { + AppendBlobClient blob = writeBlobContainerClient.getBlobClient("journal/journal.log.001").getAppendBlobClient(); + blob.createIfNotExists(); + if (!s.equals("")){ + blob.appendBlock(new ByteArrayInputStream(s.getBytes()), s.length()); + } + + return new JournalReader(new AzureJournalFile(readBlobContainerClient, writeBlobContainerClient, "journal", new WriteAccessController())); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java index 099a0d23dd0..03c635d0551 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java @@ -16,36 +16,39 @@ */ package org.apache.jackrabbit.oak.segment.azure.journal; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; -import org.apache.jackrabbit.oak.segment.file.TarRevisionsTest; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.file.TarRevisionsTest; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; import org.junit.Before; import org.junit.ClassRule; import java.io.IOException; -import java.net.URISyntaxException; public class AzureTarRevisionsTest extends TarRevisionsTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; @Before public void setup() throws Exception { - container = azurite.getContainer("oak-test"); + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); super.setup(); } @Override protected SegmentNodeStorePersistence getPersistence() throws IOException { try { - return new AzurePersistence(container.getDirectoryReference("oak")); - } catch (URISyntaxException e) { + return new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/ReverseFileReaderTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/ReverseFileReaderTest.java index 6aa742aaf6e..5d347b6f21b 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/ReverseFileReaderTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/ReverseFileReaderTest.java @@ -16,20 +16,18 @@ */ package org.apache.jackrabbit.oak.segment.azure.journal; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.azure.ReverseFileReader; import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; +import java.io.ByteArrayInputStream; import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -40,50 +38,51 @@ public class ReverseFileReaderTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient container; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); - getBlob().createOrReplace(); + public void setup() throws BlobStorageException { + container = azurite.getReadBlobContainerClient("oak-test"); + container.getBlobClient("test-blob").getAppendBlobClient().createIfNotExists(); } - private CloudAppendBlob getBlob() throws URISyntaxException, StorageException { - return container.getAppendBlobReference("test-blob"); + private BlobItem getBlob() throws BlobStorageException { + return container.listBlobs().stream().filter(blobItem -> blobItem.getName().equals("test-blob")).findFirst().get(); } @Test - public void testReverseReader() throws IOException, URISyntaxException, StorageException { - List entries = createFile( 1024, 80); - ReverseFileReader reader = new ReverseFileReader(getBlob(), 256); + public void testReverseReader() throws IOException, BlobStorageException { + List entries = createFile(1024, 80); + ReverseFileReader reader = new ReverseFileReader(container, getBlob()); assertEquals(entries, reader); } @Test - public void testEmptyFile() throws IOException, URISyntaxException, StorageException { - List entries = createFile( 0, 80); - ReverseFileReader reader = new ReverseFileReader(getBlob(), 256); + public void testEmptyFile() throws IOException, BlobStorageException { + List entries = createFile(0, 80); + ReverseFileReader reader = new ReverseFileReader(container, getBlob(), 256); assertEquals(entries, reader); } @Test - public void test1ByteBlock() throws IOException, URISyntaxException, StorageException { - List entries = createFile( 10, 16); - ReverseFileReader reader = new ReverseFileReader(getBlob(), 1); + public void test1ByteBlock() throws IOException, BlobStorageException { + List entries = createFile(10, 16); + ReverseFileReader reader = new ReverseFileReader(container, getBlob(), 1); assertEquals(entries, reader); } - private List createFile(int lines, int maxLineLength) throws IOException, URISyntaxException, StorageException { + private List createFile(int lines, int maxLineLength) throws IOException, BlobStorageException { Random random = new Random(); List entries = new ArrayList<>(); - CloudAppendBlob blob = getBlob(); + BlobItem blob = getBlob(); for (int i = 0; i < lines; i++) { int entrySize = random.nextInt(maxLineLength) + 1; String entry = randomString(entrySize); try { - blob.appendText(entry + '\n'); - } catch (StorageException e) { + String text = entry + '\n'; + container.getBlobClient(blob.getName()).getAppendBlobClient().appendBlock(new ByteArrayInputStream(text.getBytes()), text.length()); + } catch (BlobStorageException e) { throw new IOException(e); } entries.add(entry); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureJournalReaderV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureJournalReaderV8Test.java new file mode 100644 index 00000000000..e3ef6bb28bd --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureJournalReaderV8Test.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.journal.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.file.JournalReader; +import org.apache.jackrabbit.oak.segment.file.JournalReaderTest; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureJournalFileV8; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.junit.Before; +import org.junit.ClassRule; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +public class AzureJournalReaderV8Test extends JournalReaderTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + } + + protected JournalReader createJournalReader(String s) throws IOException { + try { + CloudAppendBlob blob = container.getAppendBlobReference("journal/journal.log.001"); + blob.createOrReplace(); + blob.appendText(s); + return new JournalReader(new AzureJournalFileV8(container.getDirectoryReference("journal"), "journal.log", new WriteAccessController())); + } catch (StorageException | URISyntaxException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureTarRevisionsV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureTarRevisionsV8Test.java new file mode 100644 index 00000000000..f304cfb0dfa --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureTarRevisionsV8Test.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.journal.v8; + +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.jackrabbit.oak.segment.file.TarRevisionsTest; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.junit.Before; +import org.junit.ClassRule; + +import java.io.IOException; +import java.net.URISyntaxException; + +public class AzureTarRevisionsV8Test extends TarRevisionsTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws Exception { + container = azurite.getContainer("oak-test"); + super.setup(); + } + + @Override + protected SegmentNodeStorePersistence getPersistence() throws IOException { + try { + return new AzurePersistenceV8(container.getDirectoryReference("oak")); + } catch (URISyntaxException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/ReverseFileReaderV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/ReverseFileReaderV8Test.java new file mode 100644 index 00000000000..10dd3d05a72 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/ReverseFileReaderV8Test.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.journal.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.azure.v8.ReverseFileReaderV8; +import org.junit.Assert; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Random; + +public class ReverseFileReaderV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + getBlob().createOrReplace(); + } + + private CloudAppendBlob getBlob() throws URISyntaxException, StorageException { + return container.getAppendBlobReference("test-blob"); + } + + @Test + public void testReverseReader() throws IOException, URISyntaxException, StorageException { + List entries = createFile( 1024, 80); + ReverseFileReaderV8 reader = new ReverseFileReaderV8(getBlob(), 256); + assertEquals(entries, reader); + } + + @Test + public void testEmptyFile() throws IOException, URISyntaxException, StorageException { + List entries = createFile( 0, 80); + ReverseFileReaderV8 reader = new ReverseFileReaderV8(getBlob(), 256); + assertEquals(entries, reader); + } + + @Test + public void test1ByteBlock() throws IOException, URISyntaxException, StorageException { + List entries = createFile( 10, 16); + ReverseFileReaderV8 reader = new ReverseFileReaderV8(getBlob(), 1); + assertEquals(entries, reader); + } + + + private List createFile(int lines, int maxLineLength) throws IOException, URISyntaxException, StorageException { + Random random = new Random(); + List entries = new ArrayList<>(); + CloudAppendBlob blob = getBlob(); + for (int i = 0; i < lines; i++) { + int entrySize = random.nextInt(maxLineLength) + 1; + String entry = randomString(entrySize); + try { + blob.appendText(entry + '\n'); + } catch (StorageException e) { + throw new IOException(e); + } + entries.add(entry); + } + + entries.add(""); + Collections.reverse(entries); + return entries; + } + + private static void assertEquals(List entries, ReverseFileReaderV8 reader) throws IOException { + int i = entries.size(); + for (String e : entries) { + Assert.assertEquals("line " + (--i), e, reader.readLine()); + } + Assert.assertNull(reader.readLine()); + } + + private static String randomString(int entrySize) { + Random r = new Random(); + + StringBuilder result = new StringBuilder(); + for (int i = 0; i < entrySize; i++) { + result.append((char) ('a' + r.nextInt('z' - 'a'))); + } + + return result.toString(); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyAzureServicePrincipalToTarTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyAzureServicePrincipalToTarTest.java index fc08d30f13d..7d7ada9e3b7 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyAzureServicePrincipalToTarTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyAzureServicePrincipalToTarTest.java @@ -19,18 +19,17 @@ package org.apache.jackrabbit.oak.segment.azure.tool; import com.microsoft.azure.storage.blob.CloudBlobDirectory; - -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; import org.junit.Test; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_ACCOUNT_NAME; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_TENANT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_ACCOUNT_NAME; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; import static org.junit.Assume.assumeNotNull; public class SegmentCopyAzureServicePrincipalToTarTest extends SegmentCopyTestBase { @@ -55,10 +54,10 @@ protected SegmentNodeStorePersistence getSrcPersistence() { String accountName = ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME); String path = String.format(SEGMENT_STORE_PATH_FORMAT, accountName, CONTAINER_NAME, DIR); CloudBlobDirectory cloudBlobDirectory; - try (AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager()) { - cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManager); + try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { + cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManagerV8); } - return new AzurePersistence(cloudBlobDirectory); + return new AzurePersistenceV8(cloudBlobDirectory); } @Override diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java index a5453104187..05e7d32b9ec 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java @@ -49,7 +49,7 @@ import org.apache.jackrabbit.oak.segment.SegmentCache; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; import org.apache.jackrabbit.oak.segment.azure.tool.SegmentCopy; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.SegmentStoreType; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.CompactorType; @@ -235,7 +235,7 @@ protected SegmentNodeStorePersistence getTarPersistence() { } protected SegmentNodeStorePersistence getAzurePersistence() throws Exception { - return new AzurePersistence(azurite.getContainer(AZURE_CONTAINER).getDirectoryReference(AZURE_DIRECTORY)); + return new AzurePersistenceV8(azurite.getContainer(AZURE_CONTAINER).getDirectoryReference(AZURE_DIRECTORY)); } protected String getTarPersistencePathOrUri() { diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtilsTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtilsTest.java index 4ce192c794e..4fbb51555e2 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtilsTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtilsTest.java @@ -37,8 +37,8 @@ import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.CloudBlobDirectory; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.jetbrains.annotations.NotNull; import org.junit.After; @@ -48,11 +48,11 @@ import org.mockito.MockedStatic; import org.slf4j.LoggerFactory; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_ACCOUNT_NAME; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_SECRET_KEY; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_TENANT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_ACCOUNT_NAME; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_SECRET_KEY; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -76,16 +76,16 @@ public class ToolUtilsTest { public static final String AZURE_SECRET_KEY_WARNING = "AZURE_CLIENT_ID, AZURE_CLIENT_SECRET and AZURE_TENANT_ID environment variables empty or missing. Switching to authentication with AZURE_SECRET_KEY."; private final TestEnvironment environment = new TestEnvironment(); - private AzureStorageCredentialManager azureStorageCredentialManager; + private AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; @Before public void init() { - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } @After public void clear() { - this.azureStorageCredentialManager.close(); + this.azureStorageCredentialManagerV8.close(); } @Test @@ -96,7 +96,7 @@ public void createCloudBlobDirectoryWithAccessKey() { StorageCredentialsAccountAndKey credentials = expectCredentials( StorageCredentialsAccountAndKey.class, - () -> ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManager), + () -> ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManagerV8), DEFAULT_CONTAINER_URL ); @@ -112,7 +112,7 @@ public void createCloudBlobDirectoryWithAccessKey() { public void createCloudBlobDirectoryFailsWhenAccessKeyNotPresent() { environment.setVariable(AZURE_SECRET_KEY, null); assertThrows(IllegalArgumentException.class, () -> - ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManager) + ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManagerV8) ); } @@ -120,7 +120,7 @@ public void createCloudBlobDirectoryFailsWhenAccessKeyNotPresent() { public void createCloudBlobDirectoryFailsWhenAccessKeyIsInvalid() { environment.setVariable(AZURE_SECRET_KEY, "invalid"); assertThrows(IllegalArgumentException.class, () -> - ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManager) + ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManagerV8) ); } @@ -130,7 +130,7 @@ public void createCloudBlobDirectoryWithSasUri() { StorageCredentialsSharedAccessSignature credentials = expectCredentials( StorageCredentialsSharedAccessSignature.class, - () -> ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH + '?' + sasToken, azureStorageCredentialManager), + () -> ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH + '?' + sasToken, azureStorageCredentialManagerV8), DEFAULT_CONTAINER_URL ); @@ -149,17 +149,17 @@ public void createCloudBlobDirectoryWithServicePrincipal() throws URISyntaxExcep String containerName = "oak"; String segmentStorePath = String.format(SEGMENT_STORE_PATH_FORMAT, accountName, containerName, DEFAULT_REPO_DIR); - CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(segmentStorePath, ENVIRONMENT, azureStorageCredentialManager); + CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(segmentStorePath, ENVIRONMENT, azureStorageCredentialManagerV8); assertNotNull(cloudBlobDirectory); assertEquals(containerName, cloudBlobDirectory.getContainer().getName()); } private static T expectCredentials(Class clazz, Runnable body, String containerUrl) { ArgumentCaptor credentialsCaptor = ArgumentCaptor.forClass(clazz); - try (MockedStatic mockedAzureUtilities = mockStatic(AzureUtilities.class)) { + try (MockedStatic mockedAzureUtilities = mockStatic(AzureUtilitiesV8.class)) { body.run(); - mockedAzureUtilities.verify(() -> AzureUtilities.cloudBlobDirectoryFrom( + mockedAzureUtilities.verify(() -> AzureUtilitiesV8.cloudBlobDirectoryFrom( credentialsCaptor.capture(), eq(containerUrl), eq(DEFAULT_REPO_DIR) diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8Test.java similarity index 63% rename from oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsTest.java rename to oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8Test.java index 7bbfd391a19..0f9a825da8a 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8Test.java @@ -27,7 +27,7 @@ import static org.junit.Assert.assertEquals; -public class AzureRequestOptionsTest { +public class AzureRequestOptionsV8Test { private BlobRequestOptions blobRequestOptions; @@ -38,41 +38,41 @@ public void setUp() { @Test public void testApplyDefaultRequestOptions() { - AzureRequestOptions.applyDefaultRequestOptions(blobRequestOptions); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptions.DEFAULT_TIMEOUT_EXECUTION)), Long.valueOf(blobRequestOptions.getMaximumExecutionTimeInMs())); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptions.DEFAULT_TIMEOUT_INTERVAL)), Long.valueOf(blobRequestOptions.getTimeoutIntervalInMs())); + AzureRequestOptionsV8.applyDefaultRequestOptions(blobRequestOptions); + assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_EXECUTION)), Long.valueOf(blobRequestOptions.getMaximumExecutionTimeInMs())); + assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_INTERVAL)), Long.valueOf(blobRequestOptions.getTimeoutIntervalInMs())); } @Test public void testApplyDefaultRequestOptionsWithCustomTimeouts() { - System.setProperty(AzureRequestOptions.TIMEOUT_EXECUTION_PROP, "10"); - System.setProperty(AzureRequestOptions.TIMEOUT_INTERVAL_PROP, "5"); + System.setProperty(AzureRequestOptionsV8.TIMEOUT_EXECUTION_PROP, "10"); + System.setProperty(AzureRequestOptionsV8.TIMEOUT_INTERVAL_PROP, "5"); - AzureRequestOptions.applyDefaultRequestOptions(blobRequestOptions); + AzureRequestOptionsV8.applyDefaultRequestOptions(blobRequestOptions); assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(10)), Long.valueOf(blobRequestOptions.getMaximumExecutionTimeInMs())); assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(5)), Long.valueOf(blobRequestOptions.getTimeoutIntervalInMs())); - System.clearProperty(AzureRequestOptions.TIMEOUT_EXECUTION_PROP); - System.clearProperty(AzureRequestOptions.TIMEOUT_INTERVAL_PROP); + System.clearProperty(AzureRequestOptionsV8.TIMEOUT_EXECUTION_PROP); + System.clearProperty(AzureRequestOptionsV8.TIMEOUT_INTERVAL_PROP); } @Test public void testOptimiseForWriteOperations() { - BlobRequestOptions writeBlobRequestoptions = AzureRequestOptions.optimiseForWriteOperations(blobRequestOptions); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptions.DEFAULT_TIMEOUT_EXECUTION)), Long.valueOf(writeBlobRequestoptions.getMaximumExecutionTimeInMs())); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptions.DEFAULT_TIMEOUT_INTERVAL)), Long.valueOf(writeBlobRequestoptions.getTimeoutIntervalInMs())); + BlobRequestOptions writeBlobRequestoptions = AzureRequestOptionsV8.optimiseForWriteOperations(blobRequestOptions); + assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_EXECUTION)), Long.valueOf(writeBlobRequestoptions.getMaximumExecutionTimeInMs())); + assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_INTERVAL)), Long.valueOf(writeBlobRequestoptions.getTimeoutIntervalInMs())); } @Test public void testOptimiseForWriteOperationsWithCustomTimeouts() { - System.setProperty(AzureRequestOptions.WRITE_TIMEOUT_EXECUTION_PROP, "10"); - System.setProperty(AzureRequestOptions.WRITE_TIMEOUT_INTERVAL_PROP, "5"); + System.setProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_EXECUTION_PROP, "10"); + System.setProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_INTERVAL_PROP, "5"); - BlobRequestOptions writeBlobRequestoptions = AzureRequestOptions.optimiseForWriteOperations(blobRequestOptions); + BlobRequestOptions writeBlobRequestoptions = AzureRequestOptionsV8.optimiseForWriteOperations(blobRequestOptions); assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(10)), Long.valueOf(writeBlobRequestoptions.getMaximumExecutionTimeInMs())); assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(5)), Long.valueOf(writeBlobRequestoptions.getTimeoutIntervalInMs())); - System.clearProperty(AzureRequestOptions.WRITE_TIMEOUT_EXECUTION_PROP); - System.clearProperty(AzureRequestOptions.WRITE_TIMEOUT_INTERVAL_PROP); + System.clearProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_EXECUTION_PROP); + System.clearProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_INTERVAL_PROP); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java new file mode 100644 index 00000000000..4fb086d05f4 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java @@ -0,0 +1,587 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageErrorCodeStrings; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.ListBlobItem; +import org.apache.jackrabbit.oak.api.CommitFailedException; +import org.apache.jackrabbit.oak.api.PropertyState; +import org.apache.jackrabbit.oak.api.Type; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.SegmentId; +import org.apache.jackrabbit.oak.segment.SegmentNodeStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.SegmentNotFoundException; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; +import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; +import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore; +import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.jackrabbit.oak.segment.spi.persistence.persistentcache.AbstractPersistentCache; +import org.apache.jackrabbit.oak.segment.spi.persistence.persistentcache.CachingPersistence; +import org.apache.jackrabbit.oak.segment.spi.persistence.persistentcache.PersistentCache; +import org.apache.jackrabbit.oak.segment.spi.persistence.split.SplitPersistence; +import org.apache.jackrabbit.oak.spi.commit.CommitInfo; +import org.apache.jackrabbit.oak.spi.commit.EmptyHook; +import org.apache.jackrabbit.oak.spi.state.NodeBuilder; +import org.junit.*; +import org.junit.contrib.java.lang.system.ProvideSystemProperty; +import org.junit.rules.TemporaryFolder; +import org.mockito.Mockito; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeoutException; + +import static org.apache.jackrabbit.guava.common.collect.Lists.newArrayList; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.IsNot.not; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class AzureArchiveManagerV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + @Rule + public TemporaryFolder folder = new TemporaryFolder(new File("target")); + + private CloudBlobContainer container; + + private AzurePersistenceV8 azurePersistenceV8; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); + azurePersistenceV8.setWriteAccessController(writeAccessController); + } + + @Rule + public final ProvideSystemProperty systemPropertyRule = new ProvideSystemProperty(AzureRepositoryLockV8.LEASE_DURATION_PROP, "15") + .and(AzureRepositoryLockV8.RENEWAL_INTERVAL_PROP, "3") + .and(AzureRepositoryLockV8.TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, "9"); + + @Test + public void testRecovery() throws StorageException, URISyntaxException, IOException { + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + List uuids = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + uuids.add(u); + } + + writer.flush(); + writer.close(); + + container.getBlockBlobReference("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); + + LinkedHashMap recovered = new LinkedHashMap<>(); + manager.recoverEntries("data00000a.tar", recovered); + assertEquals(uuids.subList(0, 5), newArrayList(recovered.keySet())); + } + + @Test + public void testBackupWithRecoveredEntries() throws StorageException, URISyntaxException, IOException { + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + List uuids = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + uuids.add(u); + } + + writer.flush(); + writer.close(); + + container.getBlockBlobReference("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); + + LinkedHashMap recovered = new LinkedHashMap<>(); + manager.recoverEntries("data00000a.tar", recovered); + + manager.backup("data00000a.tar", "data00000a.tar.bak", recovered.keySet()); + + for (int i = 0; i <= 4; i++) { + assertTrue(container.getBlockBlobReference("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); + } + + for (int i = 5; i <= 9; i++) { + assertFalse(String.format("Segment %s.??? should have been deleted.", "oak/data00000a.tar/000"+ i), container.getBlockBlobReference("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); + } + } + + @Test + public void testUncleanStop() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { + AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.close(); + + container.getBlockBlobReference("oak/data00000a.tar/closed").delete(); + container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.brf").delete(); + container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.gph").delete(); + + fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + assertEquals("bar", segmentNodeStore.getRoot().getString("foo")); + fs.close(); + } + + @Test + // see OAK-8566 + public void testUncleanStopWithEmptyArchive() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { + AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.close(); + + // make sure there are 2 archives + fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo2", "bar2"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.close(); + + // remove the segment 0000 from the second archive + ListBlobItem segment0000 = container.listBlobs("oak/data00001a.tar/0000.").iterator().next(); + ((CloudBlob) segment0000).delete(); + container.getBlockBlobReference("oak/data00001a.tar/closed").delete(); + + fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + assertEquals("bar", segmentNodeStore.getRoot().getString("foo")); + fs.close(); + } + + @Test + public void testUncleanStopSegmentMissing() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { + AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.close(); + + // make sure there are 2 archives + fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo0", "bar0"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.flush(); + //create segment 0001 + builder.setProperty("foo1", "bar1"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.flush(); + //create segment 0002 + builder.setProperty("foo2", "bar2"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.flush(); + //create segment 0003 + builder.setProperty("foo3", "bar3"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.flush(); + fs.close(); + + // remove the segment 0002 from the second archive + ListBlobItem segment0002 = container.listBlobs("oak/data00001a.tar/0002.").iterator().next(); + ((CloudBlob) segment0002).delete(); + container.getBlockBlobReference("oak/data00001a.tar/closed").delete(); + + fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + assertEquals("bar", segmentNodeStore.getRoot().getString("foo")); + + //recovered archive data00001a.tar should not contain segments 0002 and 0003 + assertFalse(container.listBlobs("oak/data00001a.tar/0002.").iterator().hasNext()); + assertFalse(container.listBlobs("oak/data00001a.tar/0003.").iterator().hasNext()); + + assertTrue("Backup directory should have been created", container.listBlobs("oak/data00001a.tar.bak").iterator().hasNext()); + //backup has all segments but 0002 since it was deleted before recovery + assertTrue(container.listBlobs("oak/data00001a.tar.bak/0001.").iterator().hasNext()); + assertFalse(container.listBlobs("oak/data00001a.tar.bak/0002.").iterator().hasNext()); + assertTrue(container.listBlobs("oak/data00001a.tar.bak/0003.").iterator().hasNext()); + + //verify content from recovered segments preserved + assertEquals("bar1", segmentNodeStore.getRoot().getString("foo1")); + //content from deleted segments not preserved + assertNull(segmentNodeStore.getRoot().getString("foo2")); + assertNull(segmentNodeStore.getRoot().getString("foo3")); + fs.close(); + } + + @Test + public void testExists() throws IOException, URISyntaxException { + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + List uuids = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + uuids.add(u); + } + + writer.flush(); + writer.close(); + + Assert.assertTrue(manager.exists("data00000a.tar")); + Assert.assertFalse(manager.exists("data00001a.tar")); + } + + @Test + public void testArchiveExistsAfterFlush() throws URISyntaxException, IOException { + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + Assert.assertFalse(manager.exists("data00000a.tar")); + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + writer.flush(); + Assert.assertTrue(manager.exists("data00000a.tar")); + } + + @Test(expected = FileNotFoundException.class) + public void testSegmentDeletedAfterCreatingReader() throws IOException, URISyntaxException, StorageException { + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + Assert.assertFalse(manager.exists("data00000a.tar")); + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + writer.flush(); + writer.close(); + + SegmentArchiveReader reader = manager.open("data00000a.tar"); + Buffer segment = reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); + assertNotNull(segment); + + ListBlobItem segment0000 = container.listBlobs("oak/data00000a.tar/0000.").iterator().next(); + ((CloudBlob) segment0000).delete(); + + try { + // FileNotFoundException should be thrown here + reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); + fail(); + } catch (RepositoryNotReachableException e) { + fail(); + } + } + + @Test(expected = SegmentNotFoundException.class) + public void testMissngSegmentDetectedInFileStore() throws IOException, StorageException, URISyntaxException, InvalidFileStoreVersionException { + + AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(azurePersistenceV8).build(); + + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + //Assert.assertFalse(manager.exists("data00000a.tar")); + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + writer.flush(); + writer.close(); + + SegmentArchiveReader reader = manager.open("data00000a.tar"); + Buffer segment = reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); + assertNotNull(segment); + + ListBlobItem segment0000 = container.listBlobs("oak/data00000a.tar/0000.").iterator().next(); + ((CloudBlob) segment0000).delete(); + + // SegmentNotFoundException should be thrown here + fileStore.readSegment(new SegmentId(fileStore, u.getMostSignificantBits(), u.getLeastSignificantBits())); + } + + @Test + public void testReadOnlyRecovery() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { + AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + rwFileStore.flush(); + + assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + + // create read-only FS + AzurePersistenceV8 roPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly(); + + PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() + .getRoot() + .getProperty("foo"); + assertThat(fooProperty, not(nullValue())); + assertThat(fooProperty.getValue(Type.STRING), equalTo("bar")); + + roFileStore.close(); + rwFileStore.close(); + + assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + // after creating a read-only FS, the recovery procedure should not be started since there is another running Oak process + assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + } + + @Test + public void testCachingPersistenceTarRecovery() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { + AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(rwPersistence).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + rwFileStore.flush(); + + assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + + // create files store with split persistence + AzurePersistenceV8 azureSharedPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + + CachingPersistence cachingPersistence = new CachingPersistence(createPersistenceCache(), azureSharedPersistence); + File localFolder = folder.newFolder(); + SegmentNodeStorePersistence localPersistence = new TarPersistence(localFolder); + SegmentNodeStorePersistence splitPersistence = new SplitPersistence(cachingPersistence, localPersistence); + + // exception should not be thrown here + FileStore splitPersistenceFileStore = FileStoreBuilder.fileStoreBuilder(localFolder).withCustomPersistence(splitPersistence).build(); + + assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + // after creating a read-only FS, the recovery procedure should not be started since there is another running Oak process + assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + } + + @Test + public void testCollectBlobReferencesForReadOnlyFileStore() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { + AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + rwFileStore.flush(); + + // file with binary references is not created yet + assertFalse("brf file should not be present", container.getDirectoryReference("oak/data00000a.tar").getBlockBlobReference("data00000a.tar.brf").exists()); + + // create read-only FS, while the rw FS is still open + AzurePersistenceV8 roPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { + + PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() + .getRoot() + .getProperty("foo"); + + assertThat(fooProperty, not(nullValue())); + assertThat(fooProperty.getValue(Type.STRING), equalTo("bar")); + + assertDoesNotThrow(() -> roFileStore.collectBlobReferences(s -> { + })); + } + } + } + + @Test + public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { + AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + rwFileStore.flush(); + + // file with binary references is not created yet + assertFalse("brf file should not be present", container.getDirectoryReference("oak/data00000a.tar").getBlockBlobReference("data00000a.tar.brf").exists()); + + // create read-only FS, while the rw FS is still open + AzurePersistenceV8 roPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { + + PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() + .getRoot() + .getProperty("foo"); + + assertThat(fooProperty, not(nullValue())); + assertThat(fooProperty.getValue(Type.STRING), equalTo("bar")); + + HashSet references = new HashSet<>(); + assertDoesNotThrow(() -> + roFileStore.collectBlobReferences(references::add)); + + assertTrue("No references should have been collected since reference file has not been created", references.isEmpty()); + } + } + } + + @Test + public void testWriteAfterLosingRepoLock() throws Exception { + CloudBlobDirectory oakDirectory = container.getDirectoryReference("oak"); + AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(oakDirectory); + + CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + + CloudBlockBlob blobMocked = Mockito.spy(blob); + + Mockito + .doCallRealMethod() + .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + + AzurePersistenceV8 mockedRwPersistence = Mockito.spy(rwPersistence); + WriteAccessController writeAccessController = new WriteAccessController(); + AzureRepositoryLockV8 azureRepositoryLockV8 = new AzureRepositoryLockV8(blobMocked, () -> {}, writeAccessController); + AzureArchiveManagerV8 azureArchiveManagerV8 = new AzureArchiveManagerV8(oakDirectory, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), writeAccessController); + + + Mockito + .doAnswer(invocation -> azureRepositoryLockV8.lock()) + .when(mockedRwPersistence).lockRepository(); + + Mockito + .doReturn(azureArchiveManagerV8) + .when(mockedRwPersistence).createArchiveManager(Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.any(), Mockito.any(), Mockito.any()); + Mockito + .doReturn(new AzureJournalFileV8(oakDirectory, "journal.log", writeAccessController)) + .when(mockedRwPersistence).getJournalFile(); + + FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(mockedRwPersistence).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + + + // simulate operation timeout when trying to renew lease + Mockito.reset(blobMocked); + + StorageException storageException = + new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + + Mockito.doThrow(storageException).when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + + + // wait till lease expires + Thread.sleep(17000); + + // try updating repository + Thread thread = new Thread(() -> { + try { + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + rwFileStore.flush(); + } catch (Exception e) { + fail("No Exception expected, but got: " + e.getMessage()); + } + }); + thread.start(); + + Thread.sleep(2000); + + // It should be possible to start another RW file store. + FileStore rwFileStore2 = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(new AzurePersistenceV8(oakDirectory)).build(); + SegmentNodeStore segmentNodeStore2 = SegmentNodeStoreBuilders.builder(rwFileStore2).build(); + NodeBuilder builder2 = segmentNodeStore2.getRoot().builder(); + + //repository hasn't been updated + assertNull(builder2.getProperty("foo")); + + rwFileStore2.close(); + } + + private PersistentCache createPersistenceCache() { + return new AbstractPersistentCache() { + @Override + protected Buffer readSegmentInternal(long msb, long lsb) { + return null; + } + + @Override + public boolean containsSegment(long msb, long lsb) { + return false; + } + + @Override + public void writeSegment(long msb, long lsb, Buffer buffer) { + + } + + @Override + public void cleanUp() { + + } + }; + } + + private static void assertDoesNotThrow(Executable executable) { + try { + executable.execute(); + } catch (Exception e) { + fail("No Exception expected, but got: " + e.getMessage()); + } + } + + interface Executable { + void execute() throws Exception; + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalV8Test.java new file mode 100644 index 00000000000..f431ea194b3 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalV8Test.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.jackrabbit.oak.segment.file.GcJournalTest; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; + +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +public class AzureGCJournalV8Test extends GcJournalTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + } + + @Override + protected SegmentNodeStorePersistence getPersistence() throws Exception { + return new AzurePersistenceV8(container.getDirectoryReference("oak")); + } + + @Test + @Ignore + @Override + public void testReadOak16GCLog() throws Exception { + super.testReadOak16GCLog(); + } + + @Test + @Ignore + @Override + public void testUpdateOak16GCLog() throws Exception { + super.testUpdateOak16GCLog(); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileConcurrencyIT.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8ConcurrencyIT.java similarity index 94% rename from oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileConcurrencyIT.java rename to oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8ConcurrencyIT.java index 415c1193401..6344b390bb3 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileConcurrencyIT.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8ConcurrencyIT.java @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.jackrabbit.oak.segment.azure; +package org.apache.jackrabbit.oak.segment.azure.v8; import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.StorageException; @@ -39,14 +39,14 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -public class AzureJournalFileConcurrencyIT { - private static final Logger log = LoggerFactory.getLogger(AzureJournalFileConcurrencyIT.class); +public class AzureJournalFileV8ConcurrencyIT { + private static final Logger log = LoggerFactory.getLogger(AzureJournalFileV8ConcurrencyIT.class); private static CloudBlobContainer container; private static int suffix; - private AzurePersistence persistence; + private AzurePersistenceV8 persistence; @BeforeClass public static void connectToAzure() throws URISyntaxException, InvalidKeyException, StorageException { @@ -60,7 +60,7 @@ public static void connectToAzure() throws URISyntaxException, InvalidKeyExcepti @Before public void setup() throws StorageException, InvalidKeyException, URISyntaxException, IOException, InterruptedException { - persistence = new AzurePersistence(container.getDirectoryReference("oak-" + (suffix++))); + persistence = new AzurePersistenceV8(container.getDirectoryReference("oak-" + (suffix++))); writeJournalLines(300, 0); log.info("Finished writing initial content to journal!"); } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8Test.java new file mode 100644 index 00000000000..6496ca239e2 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8Test.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.ListBlobItem; +import java.util.stream.IntStream; +import org.apache.commons.lang3.time.StopWatch; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileWriter; +import org.jetbrains.annotations.NotNull; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.jackrabbit.guava.common.collect.Lists.reverse; +import static java.util.stream.Collectors.toList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class AzureJournalFileV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + private AzureJournalFileV8 journal; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + journal = new AzureJournalFileV8(container.getDirectoryReference("journal"), "journal.log", writeAccessController, 50); + } + + @Test + public void testSplitJournalFiles() throws IOException, URISyntaxException, StorageException { + assertFalse(journal.exists()); + + int index = 0; + index = writeNLines(index, 10); // 10 + assertTrue(journal.exists()); + assertEquals(1, countJournalBlobs()); + + index = writeNLines(index, 20); // 30 + assertEquals(1, countJournalBlobs()); + + index = writeNLines(index, 30); // 60 + assertEquals(2, countJournalBlobs()); + + index = writeNLines(index, 100); // 160 + assertEquals(4, countJournalBlobs()); + + assertJournalEntriesCount(index); + } + + private int countJournalBlobs() throws URISyntaxException, StorageException { + List result = new ArrayList<>(); + for (ListBlobItem b : container.getDirectoryReference("journal").listBlobs("journal.log")) { + if (b instanceof CloudAppendBlob) { + result.add((CloudAppendBlob) b); + } + } + return result.size(); + } + + private int writeNLines(int index, int n) throws IOException { + try (JournalFileWriter writer = journal.openJournalWriter()) { + for (int i = 0; i < n; i++) { + writer.writeLine("line " + (index++)); + } + } + return index; + } + + @Test + public void testTruncateJournalFile() throws IOException { + assertFalse(journal.exists()); + + List lines = buildLines(0, 100); + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(lines); + } + + assertTrue(journal.exists()); + assertJournalEntriesCount(100); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.truncate(); + } + + assertTrue(journal.exists()); + assertJournalEntriesCount(0); + } + + @Test + public void testBatchWriteLines() throws IOException { + List lines = buildLines(0, 5000); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(lines); + } + + List entries = readEntriesFromJournal(); + assertEquals(lines, reverse(entries)); + } + + @Test + public void testEnsureBatchWriteLinesIsFasterThanNaiveImplementation() throws IOException { + List lines = buildLines(0, 100); + + StopWatch watchNaiveImpl = StopWatch.createStarted(); + try (JournalFileWriter writer = journal.openJournalWriter()) { + // Emulating previous naive implementation of 'batchWriteLines', which simply delegated to 'writeLine()' + for (String line : lines) { + writer.writeLine(line); + } + } + watchNaiveImpl.stop(); + + StopWatch watchOptimizedImpl = StopWatch.createStarted(); + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(lines); + } + watchOptimizedImpl.stop(); + long optimizedImplTime = watchOptimizedImpl.getTime(); + long naiveImplTime = watchNaiveImpl.getTime(); + assertTrue("batchWriteLines() should be significantly faster (>10x) than the naive implementation, but took " + + optimizedImplTime + "ms while naive implementation took " + naiveImplTime + "ms", optimizedImplTime < naiveImplTime / 10); + } + + @Test + public void testBatchWriteLines_splitJournalFile() throws Exception { + assertFalse(journal.exists()); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(buildLines(0, 30)); // 30 + } + assertTrue(journal.exists()); + assertEquals(1, countJournalBlobs()); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(buildLines(30, 40)); // 70 + } + assertEquals(2, countJournalBlobs()); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(buildLines(70, 30)); // 100 + } + assertEquals(2, countJournalBlobs()); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(buildLines(100, 1)); // 101 + } + assertEquals(3, countJournalBlobs()); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(buildLines(101, 100)); // 201 + } + assertEquals(5, countJournalBlobs()); + + assertJournalEntriesCount(201); + } + + private void assertJournalEntriesCount(int index) throws IOException { + List entries = readEntriesFromJournal(); + assertEquals(buildLines(0, index), reverse(entries)); + } + + @NotNull + private static List buildLines(int start, int count) { + return IntStream.range(start, count + start) + .mapToObj(i -> "line " + i) + .collect(toList()); + } + + @NotNull + private List readEntriesFromJournal() throws IOException { + List result = new ArrayList<>(); + try (JournalFileReader reader = journal.openJournalReader()) { + String entry; + while ((entry = reader.readLine()) != null) { + result.add(entry); + } + } + return result; + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8Test.java new file mode 100644 index 00000000000..8dc01a2c26a --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8Test.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.Properties; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +public class AzureManifestFileV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + } + + @Test + public void testManifest() throws URISyntaxException, IOException { + ManifestFile manifestFile = new AzurePersistenceV8(container.getDirectoryReference("oak")).getManifestFile(); + assertFalse(manifestFile.exists()); + + Properties props = new Properties(); + props.setProperty("xyz", "abc"); + props.setProperty("version", "123"); + manifestFile.save(props); + + Properties loaded = manifestFile.load(); + assertEquals(props, loaded); + } + +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java new file mode 100644 index 00000000000..ff3a2d422f6 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.SegmentId; +import org.apache.jackrabbit.oak.segment.SegmentNotFoundException; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; +import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; +import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +public class AzureReadSegmentV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + } + + @Test(expected = SegmentNotFoundException.class) + public void testReadNonExistentSegmentRepositoryReachable() throws URISyntaxException, IOException, InvalidFileStoreVersionException, StorageException { + AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + SegmentId id = new SegmentId(fs, 0, 0); + + try { + fs.readSegment(id); + } finally { + fs.close(); + } + } + + @Test(expected = RepositoryNotReachableException.class) + public void testReadExistentSegmentRepositoryNotReachable() throws URISyntaxException, IOException, InvalidFileStoreVersionException, StorageException { + AzurePersistenceV8 p = new ReadFailingAzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + + SegmentId id = new SegmentId(fs, 0, 0); + byte[] buffer = new byte[2]; + + try { + fs.writeSegment(id, buffer, 0, 2); + fs.readSegment(id); + } finally { + fs.close(); + } + } + + static class ReadFailingAzurePersistenceV8 extends AzurePersistenceV8 { + public ReadFailingAzurePersistenceV8(CloudBlobDirectory segmentStoreDirectory) { + super(segmentStoreDirectory); + } + + @Override + public SegmentArchiveManager createArchiveManager(boolean mmap, boolean offHeapAccess, IOMonitor ioMonitor, + FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { + return new AzureArchiveManagerV8(segmentstoreDirectory, ioMonitor, fileStoreMonitor, writeAccessController) { + @Override + public SegmentArchiveReader open(String archiveName) throws IOException { + CloudBlobDirectory archiveDirectory = getDirectory(archiveName); + return new AzureSegmentArchiveReaderV8(archiveDirectory, ioMonitor) { + @Override + public Buffer readSegment(long msb, long lsb) throws IOException { + throw new RepositoryNotReachableException( + new RuntimeException("Cannot access Azure storage")); + } + }; + } + + @Override + public SegmentArchiveWriter create(String archiveName) throws IOException { + CloudBlobDirectory archiveDirectory = getDirectory(archiveName); + return new AzureSegmentArchiveWriterV8(archiveDirectory, ioMonitor, fileStoreMonitor, writeAccessController) { + @Override + public Buffer readSegment(long msb, long lsb) throws IOException { + throw new RepositoryNotReachableException( + new RuntimeException("Cannot access Azure storage")); } + }; + } + }; + } + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java new file mode 100644 index 00000000000..d645a1743f0 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageErrorCodeStrings; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlockBlob; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.contrib.java.lang.system.ProvideSystemProperty; +import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeoutException; + +import static org.junit.Assert.*; + +public class AzureRepositoryLockV8Test { + + private static final Logger log = LoggerFactory.getLogger(AzureRepositoryLockV8Test.class); + public static final String LEASE_DURATION = "15"; + public static final String RENEWAL_INTERVAL = "3"; + public static final String TIME_TO_WAIT_BEFORE_BLOCK = "9"; + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + } + + @Rule + public final ProvideSystemProperty systemPropertyRule = new ProvideSystemProperty(AzureRepositoryLockV8.LEASE_DURATION_PROP, LEASE_DURATION) + .and(AzureRepositoryLockV8.RENEWAL_INTERVAL_PROP, RENEWAL_INTERVAL) + .and(AzureRepositoryLockV8.TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, TIME_TO_WAIT_BEFORE_BLOCK); + + @Test + public void testFailingLock() throws URISyntaxException, IOException, StorageException { + CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController()).lock(); + try { + new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController()).lock(); + fail("The second lock should fail."); + } catch (IOException e) { + // it's fine + } + } + + @Test + public void testWaitingLock() throws URISyntaxException, IOException, StorageException, InterruptedException { + CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + Semaphore s = new Semaphore(0); + new Thread(() -> { + try { + RepositoryLock lock = new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController()).lock(); + s.release(); + Thread.sleep(1000); + lock.unlock(); + } catch (Exception e) { + log.error("Can't lock or unlock the repo", e); + } + }).start(); + + s.acquire(); + new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController(), 10).lock(); + } + + @Test + public void testLeaseRefreshUnsuccessful() throws URISyntaxException, StorageException, IOException, InterruptedException { + CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + + CloudBlockBlob blobMocked = Mockito.spy(blob); + + // instrument the mock to throw the exception twice when renewing the lease + StorageException storageException = + new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + Mockito.doThrow(storageException) + .doThrow(storageException) + .doCallRealMethod() + .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + + new AzureRepositoryLockV8(blobMocked, () -> {}, new WriteAccessController()).lock(); + + // wait till lease expires + Thread.sleep(16000); + + // reset the mock to default behaviour + Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + + try { + new AzureRepositoryLockV8(blobMocked, () -> {}, new WriteAccessController()).lock(); + fail("The second lock should fail."); + } catch (IOException e) { + // it's fine + } + } + + @Test + public void testWritesBlockedOnlyAfterFewUnsuccessfulAttempts() throws Exception { + + CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + + CloudBlockBlob blobMocked = Mockito.spy(blob); + + // instrument the mock to throw the exception twice when renewing the lease + StorageException storageException = + new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + Mockito + .doCallRealMethod() + .doThrow(storageException) + .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + + + WriteAccessController writeAccessController = new WriteAccessController(); + + new AzureRepositoryLockV8(blobMocked, () -> {}, writeAccessController).lock(); + + + Thread thread = new Thread(() -> { + + while (true) { + writeAccessController.checkWritingAllowed(); + + } + }); + + thread.start(); + + Thread.sleep(3000); + assertFalse("after 3 seconds thread should not be in a waiting state", thread.getState().equals(Thread.State.WAITING)); + + Thread.sleep(3000); + assertFalse("after 6 seconds thread should not be in a waiting state", thread.getState().equals(Thread.State.WAITING)); + + Thread.sleep(5000); + assertTrue("after more than 9 seconds thread should be in a waiting state", thread.getState().equals(Thread.State.WAITING)); + + Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java new file mode 100644 index 00000000000..e659701cdbd --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; +import org.jetbrains.annotations.NotNull; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.mockserver.client.MockServerClient; +import org.mockserver.junit.MockServerRule; +import org.mockserver.matchers.Times; +import org.mockserver.model.BinaryBody; +import org.mockserver.model.HttpRequest; +import shaded_package.org.apache.http.client.utils.URIBuilder; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.UUID; + +import static org.junit.Assert.assertThrows; +import static org.mockserver.model.HttpRequest.request; +import static org.mockserver.model.HttpResponse.response; +import static org.mockserver.verify.VerificationTimes.exactly; + +public class AzureSegmentArchiveWriterV8Test { + public static final String BASE_PATH = "/devstoreaccount1/oak-test"; + public static final int MAX_ATTEMPTS = 3; + private static final String RETRY_ATTEMPTS = "segment.azure.retry.attempts"; + private static final String TIMEOUT_EXECUTION = "segment.timeout.execution"; + private static final String RETRY_INTERVAL_MS = "azure.segment.archive.writer.retries.intervalMs"; + private static final String WRITE_RETRY_ATTEMPTS = "azure.segment.archive.writer.retries.max"; + + @Rule + public MockServerRule mockServerRule = new MockServerRule(this); + + @SuppressWarnings("unused") + private MockServerClient mockServerClient; + + private CloudBlobContainer container; + + @Before + public void setUp() throws Exception { + container = createCloudBlobContainer(); + + System.setProperty(RETRY_INTERVAL_MS, "100"); + System.setProperty(WRITE_RETRY_ATTEMPTS, Integer.toString(MAX_ATTEMPTS)); + + // Disable Azure SDK own retry mechanism used by AzureSegmentArchiveWriter + System.setProperty(RETRY_ATTEMPTS, "0"); + System.setProperty(TIMEOUT_EXECUTION, "1"); + } + + @AfterClass + public static void setDown() { + // resetting the values for the properties set in setUp(). otherwise these will apply to all the tests that are executed after + System.clearProperty(RETRY_ATTEMPTS); + System.clearProperty(TIMEOUT_EXECUTION); + System.clearProperty(RETRY_INTERVAL_MS); + System.clearProperty(WRITE_RETRY_ATTEMPTS); + } + + @Test + public void retryWhenFailureOnWriteBinaryReferences_eventuallySucceed() throws Exception { + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + + HttpRequest writeBinaryReferencesRequest = getWriteBinaryReferencesRequest(); + // fail twice + mockServerClient + .when(writeBinaryReferencesRequest, Times.exactly(2)) + .respond(response().withStatusCode(500)); + // then succeed + mockServerClient + .when(writeBinaryReferencesRequest, Times.once()) + .respond(response().withStatusCode(201)); + + writer.writeBinaryReferences(new byte[10]); + + mockServerClient.verify(writeBinaryReferencesRequest, exactly(MAX_ATTEMPTS)); + } + + @Test + public void retryWhenFailureOnWriteGraph_eventuallySucceed() throws Exception { + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + + HttpRequest writeGraphRequest = getWriteGraphRequest(); + // fail twice + mockServerClient + .when(writeGraphRequest, Times.exactly(2)) + .respond(response().withStatusCode(500)); + // then succeed + mockServerClient + .when(writeGraphRequest, Times.once()) + .respond(response().withStatusCode(201)); + + writer.writeGraph(new byte[10]); + + mockServerClient.verify(writeGraphRequest, exactly(MAX_ATTEMPTS)); + } + + @Test + public void retryWhenFailureOnClose_eventuallySucceed() throws Exception { + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + + HttpRequest closeArchiveRequest = getCloseArchiveRequest(); + // fail twice + mockServerClient + .when(closeArchiveRequest, Times.exactly(2)) + .respond(response().withStatusCode(500)); + // then succeed + mockServerClient + .when(closeArchiveRequest, Times.once()) + .respond(response().withStatusCode(201)); + + writer.close(); + + mockServerClient.verify(closeArchiveRequest, exactly(MAX_ATTEMPTS)); + } + + @Test + public void retryWhenFailureOnClose_failAfterLastRetryAttempt() throws Exception { + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + + HttpRequest closeArchiveRequest = getCloseArchiveRequest(); + // always fail + mockServerClient + .when(closeArchiveRequest, Times.unlimited()) + .respond(response().withStatusCode(500)); + + + assertThrows(IOException.class, writer::close); + + mockServerClient.verify(closeArchiveRequest, exactly(MAX_ATTEMPTS)); + } + + + private void writeAndFlushSegment(SegmentArchiveWriter writer) throws IOException { + expectWriteRequests(); + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + writer.flush(); + } + + private void expectWriteRequests() { + mockServerClient + .when(getUploadSegmentDataRequest(), Times.once()) + .respond(response().withStatusCode(201)); + + mockServerClient + .when(getUploadSegmentMetadataRequest(), Times.once()) + .respond(response().withStatusCode(200)); + } + + @NotNull + private SegmentArchiveWriter createSegmentArchiveWriter() throws URISyntaxException, IOException { + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak"));/**/ + azurePersistenceV8.setWriteAccessController(writeAccessController); + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + return writer; + } + + private static HttpRequest getCloseArchiveRequest() { + return request() + .withMethod("PUT") + .withPath(BASE_PATH + "/oak/data00000a.tar/closed"); + } + + private static HttpRequest getWriteBinaryReferencesRequest() { + return request() + .withMethod("PUT") + .withPath(BASE_PATH + "/oak/data00000a.tar/data00000a.tar.brf"); + } + + private static HttpRequest getWriteGraphRequest() { + return request() + .withMethod("PUT") + .withPath(BASE_PATH + "/oak/data00000a.tar/data00000a.tar.gph"); + } + + private static HttpRequest getUploadSegmentMetadataRequest() { + return request() + .withMethod("PUT") + .withPath(BASE_PATH + "/oak/data00000a.tar/.*") + .withQueryStringParameter("comp", "metadata"); + } + + private static HttpRequest getUploadSegmentDataRequest() { + return request() + .withMethod("PUT") + .withPath(BASE_PATH + "/oak/data00000a.tar/.*") + .withBody(new BinaryBody(new byte[10])); + } + + @NotNull + private CloudBlobContainer createCloudBlobContainer() throws URISyntaxException, StorageException { + URI uri = new URIBuilder() + .setScheme("http") + .setHost(mockServerClient.remoteAddress().getHostName()) + .setPort(mockServerClient.remoteAddress().getPort()) + .setPath(BASE_PATH) + .build(); + + return new CloudBlobContainer(uri); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8Test.java new file mode 100644 index 00000000000..0b8b84415a0 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8Test.java @@ -0,0 +1,286 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import org.apache.jackrabbit.guava.common.collect.ImmutableSet; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.*; +import java.io.IOException; +import java.net.URISyntaxException; +import java.time.Duration; +import java.time.Instant; +import java.util.Date; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Set; +import java.util.stream.StreamSupport; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.azure.AzureSegmentStoreService; +import org.apache.jackrabbit.oak.segment.azure.Configuration; +import org.apache.jackrabbit.oak.segment.azure.util.Environment; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.sling.testing.mock.osgi.junit.OsgiContext; +import org.jetbrains.annotations.NotNull; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.osgi.util.converter.Converters; + +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_ACCOUNT_NAME; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; + +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.ADD; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.CREATE; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.LIST; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.READ; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.WRITE; +import static java.util.stream.Collectors.toSet; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeNotNull; + +public class AzureSegmentStoreV8Test { + private static final Environment ENVIRONMENT = new Environment(); + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + @Rule + public final OsgiContext context = new OsgiContext(); + + private static final EnumSet READ_ONLY = EnumSet.of(READ, LIST); + private static final EnumSet READ_WRITE = EnumSet.of(READ, LIST, CREATE, WRITE, ADD); + private static final ImmutableSet BLOBS = ImmutableSet.of("blob1", "blob2"); + + private CloudBlobContainer container; + + @Before + public void setup() throws Exception { + container = azurite.getContainer(AzureSegmentStoreService.DEFAULT_CONTAINER_NAME); + for (String blob : BLOBS) { + container.getBlockBlobReference(blob + ".txt").uploadText(blob); + } + } + + @Test + public void connectWithSharedAccessSignatureURL_readOnly() throws Exception { + String sasToken = container.generateSharedAccessSignature(policy(READ_ONLY), null); + + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessNotGranted(persistence); + assertReadAccessGranted(persistence, BLOBS); + } + + @Test + public void connectWithSharedAccessSignatureURL_readWrite() throws Exception { + String sasToken = container.generateSharedAccessSignature(policy(READ_WRITE), null); + + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessGranted(persistence); + assertReadAccessGranted(persistence, concat(BLOBS, "test")); + } + + @Test + public void connectWithSharedAccessSignatureURL_expired() throws Exception { + SharedAccessBlobPolicy expiredPolicy = policy(READ_WRITE, yesterday()); + String sasToken = container.generateSharedAccessSignature(expiredPolicy, null); + + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessNotGranted(persistence); + assertReadAccessNotGranted(persistence); + } + + @Test + public void connectWithAccessKey() throws Exception { + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithAccessKey(AzuriteDockerRule.ACCOUNT_KEY)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessGranted(persistence); + assertReadAccessGranted(persistence, concat(BLOBS, "test")); + } + + @Test + public void connectWithConnectionURL() throws Exception { + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithConfigurationURL(AzuriteDockerRule.ACCOUNT_KEY)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessGranted(persistence); + assertReadAccessGranted(persistence, concat(BLOBS, "test")); + } + + @Test + public void connectWithServicePrincipal() throws Exception { + // Note: make sure blob1.txt and blob2.txt are uploaded to + // AZURE_ACCOUNT_NAME/oak before running this test + + assumeNotNull(ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME)); + assumeNotNull(ENVIRONMENT.getVariable(AZURE_TENANT_ID)); + assumeNotNull(ENVIRONMENT.getVariable(AZURE_CLIENT_ID)); + assumeNotNull(ENVIRONMENT.getVariable(AZURE_CLIENT_SECRET)); + + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + String accountName = ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME); + String tenantId = ENVIRONMENT.getVariable(AZURE_TENANT_ID); + String clientId = ENVIRONMENT.getVariable(AZURE_CLIENT_ID); + String clientSecret = ENVIRONMENT.getVariable(AZURE_CLIENT_SECRET); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithServicePrincipal(accountName, clientId, clientSecret, tenantId)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessGranted(persistence); + assertReadAccessGranted(persistence, concat(BLOBS, "test")); + } + + @Test + public void deactivate() throws Exception { + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithAccessKey(AzuriteDockerRule.ACCOUNT_KEY)); + assertNotNull(context.getService(SegmentNodeStorePersistence.class)); + + azureSegmentStoreService.deactivate(); + assertNull(context.getService(SegmentNodeStorePersistence.class)); + } + + @NotNull + private static SharedAccessBlobPolicy policy(EnumSet permissions, Instant expirationTime) { + SharedAccessBlobPolicy sharedAccessBlobPolicy = new SharedAccessBlobPolicy(); + sharedAccessBlobPolicy.setPermissions(permissions); + sharedAccessBlobPolicy.setSharedAccessExpiryTime(Date.from(expirationTime)); + return sharedAccessBlobPolicy; + } + + @NotNull + private static SharedAccessBlobPolicy policy(EnumSet permissions) { + return policy(permissions, Instant.now().plus(Duration.ofDays(7))); + } + + private static void assertReadAccessGranted(SegmentNodeStorePersistence persistence, Set expectedBlobs) throws Exception { + CloudBlobContainer container = getContainerFrom(persistence); + Set actualBlobNames = StreamSupport.stream(container.listBlobs().spliterator(), false) + .map(blob -> blob.getUri().getPath()) + .map(path -> path.substring(path.lastIndexOf('/') + 1)) + .filter(name -> name.equals("test.txt") || name.startsWith("blob")) + .collect(toSet()); + Set expectedBlobNames = expectedBlobs.stream().map(name -> name + ".txt").collect(toSet()); + + assertEquals(expectedBlobNames, actualBlobNames); + + Set actualBlobContent = actualBlobNames.stream() + .map(name -> { + try { + return container.getBlockBlobReference(name).downloadText(); + } catch (StorageException | IOException | URISyntaxException e) { + throw new RuntimeException("Error while reading blob " + name, e); + } + }) + .collect(toSet()); + assertEquals(expectedBlobs, actualBlobContent); + } + + private static void assertWriteAccessGranted(SegmentNodeStorePersistence persistence) throws Exception { + getContainerFrom(persistence) + .getBlockBlobReference("test.txt").uploadText("test"); + } + + private static CloudBlobContainer getContainerFrom(SegmentNodeStorePersistence persistence) throws Exception { + return ((AzurePersistenceV8) persistence).getSegmentstoreDirectory().getContainer(); + } + + private static void assertWriteAccessNotGranted(SegmentNodeStorePersistence persistence) { + try { + assertWriteAccessGranted(persistence); + fail("Write access should not be granted, but writing to the storage succeeded."); + } catch (Exception e) { + // successful + } + } + + private static void assertReadAccessNotGranted(SegmentNodeStorePersistence persistence) { + try { + assertReadAccessGranted(persistence, BLOBS); + fail("Read access should not be granted, but reading from the storage succeeded."); + } catch (Exception e) { + // successful + } + } + + private static Instant yesterday() { + return Instant.now().minus(Duration.ofDays(1)); + } + + private static ImmutableSet concat(ImmutableSet blobs, String element) { + return ImmutableSet.builder().addAll(blobs).add(element).build(); + } + + private static Configuration getConfigurationWithSharedAccessSignature(String sasToken) { + return getConfiguration(sasToken, AzuriteDockerRule.ACCOUNT_NAME, null, null, null, null, null); + } + + private static Configuration getConfigurationWithAccessKey(String accessKey) { + return getConfiguration(null, AzuriteDockerRule.ACCOUNT_NAME, accessKey, null, null, null, null); + } + + private static Configuration getConfigurationWithConfigurationURL(String accessKey) { + String connectionString = "DefaultEndpointsProtocol=https;" + + "BlobEndpoint=" + azurite.getBlobEndpoint() + ';' + + "AccountName=" + AzuriteDockerRule.ACCOUNT_NAME + ';' + + "AccountKey=" + accessKey + ';'; + return getConfiguration(null, AzuriteDockerRule.ACCOUNT_NAME, null, connectionString, null, null, null); + } + + private static Configuration getConfigurationWithServicePrincipal(String accountName, String clientId, String clientSecret, String tenantId) { + return getConfiguration(null, accountName, null, null, clientId, clientSecret, tenantId); + } + + @NotNull + private static Configuration getConfiguration(String sasToken, String accountName, String accessKey, String connectionURL, String clientId, String clientSecret, String tenantId) { + return Converters.standardConverter() + .convert(new HashMap() {{ + put("accountName", accountName); + put("accessKey", accessKey); + put("connectionURL", connectionURL); + put("sharedAccessSignature", sasToken); + put("clientId", clientId); + put("clientSecret", clientSecret); + put("tenantId", tenantId); + put("blobEndpoint", azurite.getBlobEndpoint()); + }}) + .to(Configuration.class); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFileV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFileV8Test.java new file mode 100644 index 00000000000..55d0d270a6e --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFileV8Test.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.file.tar.TarFileTest; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +public class AzureTarFileV8Test extends TarFileTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + @Override + public void setUp() throws IOException { + try { + container = azurite.getContainer("oak-test"); + AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + azurePersistenceV8.setWriteAccessController(writeAccessController); + archiveManager = azurePersistenceV8.createArchiveManager(true, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + } catch (StorageException | InvalidKeyException | URISyntaxException e) { + throw new IOException(e); + } + } + + @Override + protected long getWriteAndReadExpectedSize() { + return 45; + } + + @Test + @Ignore + @Override + public void graphShouldBeTrimmedDownOnSweep() throws Exception { + super.graphShouldBeTrimmedDownOnSweep(); + } +} \ No newline at end of file diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFilesV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFilesV8Test.java new file mode 100644 index 00000000000..d17e3862001 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFilesV8Test.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.file.tar.TarFiles; +import org.apache.jackrabbit.oak.segment.file.tar.TarFilesTest; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; +import org.junit.Before; +import org.junit.ClassRule; + +public class AzureTarFilesV8Test extends TarFilesTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + @Override + public void setUp() throws Exception { + container = azurite.getContainer("oak-test"); + AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + azurePersistenceV8.setWriteAccessController(writeAccessController); + tarFiles = TarFiles.builder() + .withDirectory(folder.newFolder()) + .withTarRecovery((id, data, recovery) -> { + // Intentionally left blank + }) + .withIOMonitor(new IOMonitorAdapter()) + .withFileStoreMonitor(new FileStoreMonitorAdapter()) + .withRemoteStoreMonitor(new RemoteStoreMonitorAdapter()) + .withMaxFileSize(MAX_FILE_SIZE) + .withPersistence(azurePersistenceV8) + .build(); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarWriterV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarWriterV8Test.java new file mode 100644 index 00000000000..18421c74e7c --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarWriterV8Test.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.file.tar.TarWriterTest; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; +import org.jetbrains.annotations.NotNull; +import org.junit.Before; +import org.junit.ClassRule; + +import java.io.IOException; + +public class AzureTarWriterV8Test extends TarWriterTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setUp() throws Exception { + container = azurite.getContainer("oak-test"); + } + + @NotNull + @Override + protected SegmentArchiveManager getSegmentArchiveManager() throws Exception { + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + AzureArchiveManagerV8 azureArchiveManagerV8 = new AzureArchiveManagerV8(container.getDirectoryReference("oak"), new IOMonitorAdapter(), monitor, writeAccessController); + return azureArchiveManagerV8; + } + + @NotNull + @Override + protected SegmentArchiveManager getFailingSegmentArchiveManager() throws Exception { + final WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + return new AzureArchiveManagerV8(container.getDirectoryReference("oak"), new IOMonitorAdapter(), monitor, writeAccessController) { + @Override + public SegmentArchiveWriter create(String archiveName) throws IOException { + return new AzureSegmentArchiveWriterV8(getDirectory(archiveName), ioMonitor, monitor, writeAccessController) { + @Override + public void writeGraph(@NotNull byte[] data) throws IOException { + throw new IOException("test"); + } + }; + } + }; + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java index 04d5fa0967b..4cdab554dcb 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java @@ -16,25 +16,17 @@ */ package org.apache.jackrabbit.oak.segment.spi.persistence.split; -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.util.HashSet; -import java.util.Random; -import java.util.Set; - -import com.microsoft.azure.storage.StorageException; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.commons.collections.CollectionUtils; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; @@ -52,6 +44,15 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.HashSet; +import java.util.Random; +import java.util.Set; + import static org.junit.Assert.assertEquals; public class SplitPersistenceBlobTest { @@ -75,9 +76,13 @@ public class SplitPersistenceBlobTest { private SegmentNodeStorePersistence splitPersistence; @Before - public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException { + public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, BlobStorageException { + BlobContainerClient readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + BlobContainerClient writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + BlobContainerClient noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); + SegmentNodeStorePersistence sharedPersistence = - new AzurePersistence(azurite.getContainer("oak-test").getDirectoryReference("oak")); + new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient,"oak"); File dataStoreDir = new File(folder.getRoot(), "blobstore"); BlobStore blobStore = newBlobStore(dataStoreDir); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java index 48db9845256..49c0ac6ae87 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java @@ -16,12 +16,13 @@ */ package org.apache.jackrabbit.oak.segment.spi.persistence.split; -import com.microsoft.azure.storage.StorageException; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; @@ -70,8 +71,12 @@ public class SplitPersistenceTest { private SegmentNodeStorePersistence splitPersistence; @Before - public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException { - SegmentNodeStorePersistence sharedPersistence = new AzurePersistence(azurite.getContainer("oak-test").getDirectoryReference("oak")); + public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, BlobStorageException { + BlobContainerClient readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + BlobContainerClient writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + BlobContainerClient noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); + + SegmentNodeStorePersistence sharedPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient,"oak"); baseFileStore = FileStoreBuilder .fileStoreBuilder(folder.newFolder()) diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java new file mode 100644 index 00000000000..7b6f1d1e4bd --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.spi.persistence.split.v8; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.HashSet; +import java.util.Random; +import java.util.Set; + +import com.microsoft.azure.storage.StorageException; +import org.apache.jackrabbit.oak.api.Blob; +import org.apache.jackrabbit.oak.api.CommitFailedException; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.commons.collections.CollectionUtils; +import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; +import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; +import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; +import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.jackrabbit.oak.segment.spi.persistence.split.SplitPersistence; +import org.apache.jackrabbit.oak.spi.blob.BlobStore; +import org.apache.jackrabbit.oak.spi.commit.CommitInfo; +import org.apache.jackrabbit.oak.spi.commit.EmptyHook; +import org.apache.jackrabbit.oak.spi.state.NodeBuilder; +import org.apache.jackrabbit.oak.spi.state.NodeStore; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.junit.Assert.assertEquals; + +public class SplitPersistenceBlobV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + @Rule + public TemporaryFolder folder = new TemporaryFolder(new File("target")); + + private SegmentNodeStore base; + + private SegmentNodeStore split; + + private FileStore baseFileStore; + + private FileStore splitFileStore; + + private String baseBlobId; + + private SegmentNodeStorePersistence splitPersistence; + + @Before + public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException { + SegmentNodeStorePersistence sharedPersistence = + new AzurePersistenceV8(azurite.getContainer("oak-test").getDirectoryReference("oak")); + File dataStoreDir = new File(folder.getRoot(), "blobstore"); + BlobStore blobStore = newBlobStore(dataStoreDir); + + baseFileStore = FileStoreBuilder + .fileStoreBuilder(folder.newFolder()) + .withCustomPersistence(sharedPersistence) + .withBlobStore(blobStore) + .build(); + base = SegmentNodeStoreBuilders.builder(baseFileStore).build(); + + NodeBuilder builder = base.getRoot().builder(); + builder.child("foo").child("bar").setProperty("version", "v1"); + base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + + baseBlobId = createLoad(base, baseFileStore).getContentIdentity(); + baseFileStore.flush(); + baseFileStore.close(); + + baseFileStore = FileStoreBuilder + .fileStoreBuilder(folder.newFolder()) + .withCustomPersistence(sharedPersistence) + .withBlobStore(blobStore) + .build(); + base = SegmentNodeStoreBuilders.builder(baseFileStore).build(); + + createLoad(base, baseFileStore).getContentIdentity(); + baseFileStore.flush(); + + SegmentNodeStorePersistence localPersistence = new TarPersistence(folder.newFolder()); + splitPersistence = new SplitPersistence(sharedPersistence, localPersistence); + + splitFileStore = FileStoreBuilder + .fileStoreBuilder(folder.newFolder()) + .withCustomPersistence(splitPersistence) + .withBlobStore(blobStore) + .build(); + split = SegmentNodeStoreBuilders.builder(splitFileStore).build(); + } + + @After + public void tearDown() { + baseFileStore.close(); + } + + @Test + public void collectReferences() + throws IOException, CommitFailedException { + String blobId = createLoad(split, splitFileStore).getContentIdentity(); + + assertReferences(2, CollectionUtils.toSet(baseBlobId, blobId)); + } + + private static Blob createBlob(NodeStore nodeStore, int size) throws IOException { + byte[] data = new byte[size]; + new Random().nextBytes(data); + return nodeStore.createBlob(new ByteArrayInputStream(data)); + } + + private static BlobStore newBlobStore(File directory) { + OakFileDataStore delegate = new OakFileDataStore(); + delegate.setPath(directory.getAbsolutePath()); + delegate.init(null); + return new DataStoreBlobStore(delegate); + } + + private Blob createLoad(SegmentNodeStore store, FileStore fileStore) + throws IOException, CommitFailedException { + NodeBuilder builder = store.getRoot().builder(); + Blob blob = createBlob(store, 18000); + builder.setProperty("bin", blob); + store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fileStore.flush(); + return blob; + } + + private void assertReferences(int count, Set blobIds) + throws IOException { + Set actualReferences = new HashSet<>(); + splitFileStore.collectBlobReferences(actualReferences::add); + assertEquals("visible references different", count, actualReferences.size()); + assertEquals("Binary reference returned should be same", blobIds, actualReferences); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java new file mode 100644 index 00000000000..a5af0abb47b --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.spi.persistence.split.v8; + +import com.microsoft.azure.storage.StorageException; +import org.apache.jackrabbit.oak.api.CommitFailedException; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.SegmentNodeStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; +import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; +import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence; +import org.apache.jackrabbit.oak.segment.file.tar.binaries.BinaryReferencesIndexLoader; +import org.apache.jackrabbit.oak.segment.file.tar.binaries.InvalidBinaryReferencesIndexException; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.jackrabbit.oak.segment.spi.persistence.split.SplitPersistence; +import org.apache.jackrabbit.oak.spi.commit.CommitInfo; +import org.apache.jackrabbit.oak.spi.commit.EmptyHook; +import org.apache.jackrabbit.oak.spi.state.NodeBuilder; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +import static org.junit.Assert.assertEquals; + +public class SplitPersistenceV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + @Rule + public TemporaryFolder folder = new TemporaryFolder(new File("target")); + + private SegmentNodeStore base; + + private SegmentNodeStore split; + + private FileStore baseFileStore; + + private FileStore splitFileStore; + + private SegmentNodeStorePersistence splitPersistence; + + @Before + public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException { + SegmentNodeStorePersistence sharedPersistence = new AzurePersistenceV8(azurite.getContainer("oak-test").getDirectoryReference("oak")); + + baseFileStore = FileStoreBuilder + .fileStoreBuilder(folder.newFolder()) + .withCustomPersistence(sharedPersistence) + .build(); + base = SegmentNodeStoreBuilders.builder(baseFileStore).build(); + + NodeBuilder builder = base.getRoot().builder(); + builder.child("foo").child("bar").setProperty("version", "v1"); + base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + baseFileStore.flush(); + + SegmentNodeStorePersistence localPersistence = new TarPersistence(folder.newFolder()); + splitPersistence = new SplitPersistence(sharedPersistence, localPersistence); + + splitFileStore = FileStoreBuilder + .fileStoreBuilder(folder.newFolder()) + .withCustomPersistence(splitPersistence) + .build(); + split = SegmentNodeStoreBuilders.builder(splitFileStore).build(); + } + + @After + public void tearDown() { + if (splitFileStore != null) { + splitFileStore.close(); + } + + if (baseFileStore != null) { + baseFileStore.close(); + } + } + + @Test + public void testBaseNodeAvailable() { + assertEquals("v1", split.getRoot().getChildNode("foo").getChildNode("bar").getString("version")); + } + + @Test + public void testChangesAreLocalForBaseRepository() throws CommitFailedException { + NodeBuilder builder = base.getRoot().builder(); + builder.child("foo").child("bar").setProperty("version", "v2"); + base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + + assertEquals("v1", split.getRoot().getChildNode("foo").getChildNode("bar").getString("version")); + } + + @Test + public void testChangesAreLocalForSplitRepository() throws CommitFailedException { + NodeBuilder builder = split.getRoot().builder(); + builder.child("foo").child("bar").setProperty("version", "v2"); + split.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + + assertEquals("v1", base.getRoot().getChildNode("foo").getChildNode("bar").getString("version")); + } + + @Test + public void testBinaryReferencesAreNotNull() throws IOException, InvalidBinaryReferencesIndexException { + splitFileStore.close(); + splitFileStore = null; + + SegmentArchiveManager manager = splitPersistence.createArchiveManager(true, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + for (String archive : manager.listArchives()) { + SegmentArchiveReader reader = manager.open(archive); + BinaryReferencesIndexLoader.parseBinaryReferencesIndex(reader.getBinaryReferences()); + reader.close(); + } + } +} diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java index ae299321978..44ef7a4eba9 100644 --- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java +++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java @@ -24,9 +24,9 @@ import org.apache.jackrabbit.guava.common.io.Closer; import org.apache.jackrabbit.guava.common.io.Files; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; @@ -56,7 +56,7 @@ public class SegmentAzureFactory implements NodeStoreFactory { private int segmentCacheSize; private final boolean readOnly; private static final Environment environment = new Environment(); - private AzureStorageCredentialManager azureStorageCredentialManager; + private AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; public static class Builder { private final String dir; @@ -118,7 +118,7 @@ public SegmentAzureFactory(Builder builder) { @Override public NodeStore create(BlobStore blobStore, Closer closer) throws IOException { - AzurePersistence azPersistence = null; + AzurePersistenceV8 azPersistence = null; try { azPersistence = createAzurePersistence(closer); } catch (StorageException | URISyntaxException | InvalidKeyException e) { @@ -152,34 +152,34 @@ public NodeStore create(BlobStore blobStore, Closer closer) throws IOException { } } - private AzurePersistence createAzurePersistence(Closer closer) throws StorageException, URISyntaxException, InvalidKeyException { + private AzurePersistenceV8 createAzurePersistence(Closer closer) throws StorageException, URISyntaxException, InvalidKeyException { CloudBlobDirectory cloudBlobDirectory = null; // connection string will take precedence over accountkey / sas / service principal if (StringUtils.isNoneBlank(connectionString, containerName)) { - cloudBlobDirectory = AzureUtilities.cloudBlobDirectoryFrom(connectionString, containerName, dir); + cloudBlobDirectory = AzureUtilitiesV8.cloudBlobDirectoryFrom(connectionString, containerName, dir); } else if (StringUtils.isNoneBlank(accountName, uri)) { StorageCredentials credentials = null; if (StringUtils.isNotBlank(sasToken)) { credentials = new StorageCredentialsSharedAccessSignature(sasToken); } else { - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); - credentials = azureStorageCredentialManager.getStorageCredentialsFromEnvironment(accountName, environment); - closer.register(azureStorageCredentialManager); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); + credentials = azureStorageCredentialManagerV8.getStorageCredentialsFromEnvironment(accountName, environment); + closer.register(azureStorageCredentialManagerV8); } - cloudBlobDirectory = AzureUtilities.cloudBlobDirectoryFrom(credentials, uri, dir); + cloudBlobDirectory = AzureUtilitiesV8.cloudBlobDirectoryFrom(credentials, uri, dir); } if (cloudBlobDirectory == null) { throw new IllegalArgumentException("Could not connect to Azure storage. Too few connection parameters specified!"); } - return new AzurePersistence(cloudBlobDirectory); + return new AzurePersistenceV8(cloudBlobDirectory); } @Override public boolean hasExternalBlobReferences() throws IOException { - AzurePersistence azPersistence = null; + AzurePersistenceV8 azPersistence = null; Closer closer = Closer.create(); CliUtils.handleSigInt(closer); try { diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java index 1d42f0ee296..db8e92de54b 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java @@ -16,7 +16,7 @@ */ package org.apache.jackrabbit.oak.upgrade.cli; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.upgrade.cli.container.NodeStoreContainer; import org.apache.jackrabbit.oak.upgrade.cli.container.SegmentAzureServicePrincipalNodeStoreContainer; @@ -34,8 +34,8 @@ public class SegmentTarToSegmentAzureServicePrincipalTest extends AbstractOak2Oa @Override public void prepare() throws Exception { - assumeNotNull(ENVIRONMENT.getVariable(AzureUtilities.AZURE_ACCOUNT_NAME), ENVIRONMENT.getVariable(AzureUtilities.AZURE_TENANT_ID), - ENVIRONMENT.getVariable(AzureUtilities.AZURE_CLIENT_ID), ENVIRONMENT.getVariable(AzureUtilities.AZURE_CLIENT_SECRET)); + assumeNotNull(ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_ACCOUNT_NAME), ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_TENANT_ID), + ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_CLIENT_ID), ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_CLIENT_SECRET)); skipTest = false; super.prepare(); } diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java index 5b99de8ad07..c7b69ec69db 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java @@ -23,8 +23,8 @@ import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; @@ -76,9 +76,9 @@ private SegmentAzureNodeStoreContainer(AzuriteDockerRule azurite, BlobStoreConta @Override public NodeStore open() throws IOException { - AzurePersistence azPersistence = null; + AzurePersistenceV8 azPersistence = null; try { - azPersistence = new AzurePersistence(container.getDirectoryReference(dir)); + azPersistence = new AzurePersistenceV8(container.getDirectoryReference(dir)); } catch (URISyntaxException e) { throw new IllegalStateException(e); } @@ -113,7 +113,7 @@ public void close() { @Override public void clean() throws IOException { try { - AzureUtilities.deleteAllEntries(container.getDirectoryReference(dir)); + AzureUtilitiesV8.deleteAllEntries(container.getDirectoryReference(dir)); } catch (URISyntaxException e) { throw new IOException(e); } diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureServicePrincipalNodeStoreContainer.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureServicePrincipalNodeStoreContainer.java index c0619c60419..5a82f6ef9c1 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureServicePrincipalNodeStoreContainer.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureServicePrincipalNodeStoreContainer.java @@ -19,9 +19,9 @@ import com.microsoft.azure.storage.blob.CloudBlobDirectory; import org.apache.jackrabbit.guava.common.io.Files; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.segment.file.FileStore; @@ -43,8 +43,8 @@ public class SegmentAzureServicePrincipalNodeStoreContainer implements NodeStore private final BlobStore blobStore; private FileStore fs; private File tmpDir; - private AzurePersistence azurePersistence; - private final AzureStorageCredentialManager azureStorageCredentialManager; + private AzurePersistenceV8 azurePersistenceV8; + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; public SegmentAzureServicePrincipalNodeStoreContainer() { this(null); @@ -52,21 +52,21 @@ public SegmentAzureServicePrincipalNodeStoreContainer() { public SegmentAzureServicePrincipalNodeStoreContainer(BlobStore blobStore) { this.blobStore = blobStore; - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } @Override public NodeStore open() throws IOException { try { - azurePersistence = createAzurePersistence(); + azurePersistenceV8 = createAzurePersistence(); } catch (Exception e) { throw new IllegalStateException(e); } tmpDir = Files.createTempDir(); FileStoreBuilder builder = FileStoreBuilder.fileStoreBuilder(tmpDir) - .withCustomPersistence(azurePersistence).withMemoryMapping(false); + .withCustomPersistence(azurePersistenceV8).withMemoryMapping(false); if (blobStore != null) { builder.withBlobStore(blobStore); } @@ -80,14 +80,14 @@ public NodeStore open() throws IOException { return new FileStoreUtils.NodeStoreWithFileStore(SegmentNodeStoreBuilders.builder(fs).build(), fs); } - private AzurePersistence createAzurePersistence() { - if (azurePersistence != null) { - return azurePersistence; + private AzurePersistenceV8 createAzurePersistence() { + if (azurePersistenceV8 != null) { + return azurePersistenceV8; } - String path = String.format(AZURE_SEGMENT_STORE_PATH, ENVIRONMENT.getVariable(AzureUtilities.AZURE_ACCOUNT_NAME), + String path = String.format(AZURE_SEGMENT_STORE_PATH, ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_ACCOUNT_NAME), CONTAINER_NAME, DIR); - CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManager); - return new AzurePersistence(cloudBlobDirectory); + CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManagerV8); + return new AzurePersistenceV8(cloudBlobDirectory); } @Override @@ -99,16 +99,16 @@ public void close() { if (tmpDir != null) { tmpDir.delete(); } - if (azureStorageCredentialManager != null) { - azureStorageCredentialManager.close(); + if (azureStorageCredentialManagerV8 != null) { + azureStorageCredentialManagerV8.close(); } } @Override public void clean() throws IOException { - AzurePersistence azurePersistence = createAzurePersistence(); + AzurePersistenceV8 azurePersistenceV8 = createAzurePersistence(); try { - AzureUtilities.deleteAllBlobs(azurePersistence.getSegmentstoreDirectory()); + AzureUtilitiesV8.deleteAllBlobs(azurePersistenceV8.getSegmentstoreDirectory()); } catch (Exception e) { throw new IOException(e); } @@ -116,7 +116,7 @@ public void clean() throws IOException { @Override public String getDescription() { - return "az:" + String.format(AZURE_SEGMENT_STORE_PATH, ENVIRONMENT.getVariable(AzureUtilities.AZURE_ACCOUNT_NAME), + return "az:" + String.format(AZURE_SEGMENT_STORE_PATH, ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_ACCOUNT_NAME), CONTAINER_NAME, DIR); } } \ No newline at end of file diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java index 00e10ddfe5f..691411c011d 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java @@ -25,8 +25,8 @@ import org.apache.commons.lang3.StringUtils; import org.apache.jackrabbit.guava.common.io.Closer; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.upgrade.cli.CliUtils; @@ -40,11 +40,11 @@ import java.util.Date; import java.util.EnumSet; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_ACCOUNT_NAME; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_SECRET_KEY; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_TENANT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_ACCOUNT_NAME; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_SECRET_KEY; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; import static org.junit.Assert.assertEquals; import static org.junit.Assume.assumeNotNull; import static org.junit.Assume.assumeTrue; @@ -109,7 +109,7 @@ public void testConnectionWithUri_accessKey() throws IOException { String uri = String.format(CONNECTION_URI, ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME), CONTAINER_NAME); Closer closer = Closer.create(); - try (AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager()) { + try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { try { SegmentAzureFactory segmentAzureFactory = new SegmentAzureFactory.Builder(DIR, 256, false) @@ -122,7 +122,7 @@ public void testConnectionWithUri_accessKey() throws IOException { assertEquals(1, nodeStore.getFileStore().getSegmentCount()); } finally { closer.close(); - cleanup(uri, azureStorageCredentialManager); + cleanup(uri, azureStorageCredentialManagerV8); } } } @@ -136,7 +136,7 @@ public void testConnectionWithUri_servicePrincipal() throws IOException, Interru String uri = String.format(CONNECTION_URI, ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME), CONTAINER_NAME); Closer closer = Closer.create(); - try (AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager()) { + try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { try { SegmentAzureFactory segmentAzureFactory = new SegmentAzureFactory.Builder(DIR, 256, false) @@ -149,16 +149,16 @@ public void testConnectionWithUri_servicePrincipal() throws IOException, Interru assertEquals(1, nodeStore.getFileStore().getSegmentCount()); } finally { closer.close(); - cleanup(uri, azureStorageCredentialManager); + cleanup(uri, azureStorageCredentialManagerV8); } } } - private void cleanup(String uri, AzureStorageCredentialManager azureStorageCredentialManager) { + private void cleanup(String uri, AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8) { uri = uri + "/" + DIR; try { - CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(uri, ENVIRONMENT, azureStorageCredentialManager); - AzureUtilities.deleteAllBlobs(cloudBlobDirectory); + CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(uri, ENVIRONMENT, azureStorageCredentialManagerV8); + AzureUtilitiesV8.deleteAllBlobs(cloudBlobDirectory); } catch (Exception e) { throw new IllegalStateException(e); }